Repository: unias/docklet Branch: master Commit: 70c089a6a5bb Files: 185 Total size: 1.3 MB Directory structure: gitextract_vecf3u3x/ ├── .gitignore ├── CHANGES ├── LICENSE ├── README.md ├── VERSION ├── bin/ │ ├── docklet-master │ ├── docklet-supermaster │ └── docklet-worker ├── cloudsdk-installer.sh ├── conf/ │ ├── container/ │ │ ├── lxc2.container.batch.conf │ │ ├── lxc2.container.conf │ │ ├── lxc3.container.batch.conf │ │ └── lxc3.container.conf │ ├── docklet.conf.template │ ├── lxc-script/ │ │ ├── lxc-ifdown │ │ ├── lxc-ifup │ │ ├── lxc-mount │ │ └── lxc-prestart │ └── nginx_docklet.conf ├── doc/ │ ├── devdoc/ │ │ ├── coding.md │ │ ├── config_info.md │ │ ├── network-arch.md │ │ ├── networkmgr.md │ │ ├── openvswitch-vlan.md │ │ ├── proxy-control.md │ │ └── startup.md │ ├── devguide/ │ │ └── devguide.md │ └── example/ │ └── example-LogisticRegression.py ├── meter/ │ ├── connector/ │ │ ├── master.py │ │ └── minion.py │ ├── daemon/ │ │ ├── http.py │ │ ├── master_v1.py │ │ └── minion_v1.py │ ├── intra/ │ │ ├── billing.py │ │ ├── cgroup.py │ │ ├── smart.py │ │ └── system.py │ ├── main.py │ └── policy/ │ ├── allocate.py │ └── quota.py ├── prepare.sh ├── src/ │ ├── master/ │ │ ├── beansapplicationmgr.py │ │ ├── bugreporter.py │ │ ├── cloudmgr.py │ │ ├── deploy.py │ │ ├── httprest.py │ │ ├── jobmgr.py │ │ ├── lockmgr.py │ │ ├── monitor.py │ │ ├── network.py │ │ ├── nodemgr.py │ │ ├── notificationmgr.py │ │ ├── parser.py │ │ ├── releasemgr.py │ │ ├── settings.py │ │ ├── sysmgr.py │ │ ├── taskmgr.py │ │ ├── testTaskCtrler.py │ │ ├── testTaskMgr.py │ │ ├── testTaskWorker.py │ │ ├── userManager.py │ │ ├── userinit.sh │ │ └── vclustermgr.py │ ├── protos/ │ │ ├── rpc.proto │ │ ├── rpc_pb2.py │ │ └── rpc_pb2_grpc.py │ ├── utils/ │ │ ├── env.py │ │ ├── etcdlib.py │ │ ├── gputools.py │ │ ├── imagemgr.py │ │ ├── log.py │ │ ├── logs.py │ │ ├── lvmtool.py │ │ ├── manage.py │ │ ├── model.py │ │ ├── nettools.py │ │ ├── proxytool.py │ │ ├── tools.py │ │ └── updatebase.py │ └── worker/ │ ├── container.py │ ├── monitor.py │ ├── ossmounter.py │ ├── taskcontroller.py │ ├── taskworker.py │ └── worker.py ├── tools/ │ ├── DOCKLET_NOTES.txt │ ├── R_demo.ipynb │ ├── alterUserTable.py │ ├── clean-usage.py │ ├── cloudsetting.aliyun.template.json │ ├── dl_start_spark.sh │ ├── dl_stop_spark.sh │ ├── docklet-deploy.sh │ ├── etcd-multi-nodes.sh │ ├── etcd-one-node.sh │ ├── nginx_config.sh │ ├── npmrc │ ├── pip.conf │ ├── python_demo.ipynb │ ├── resolv.conf │ ├── sources.list │ ├── start_jupyter.sh │ ├── update-UserTable.sh │ ├── update-basefs.sh │ ├── update_baseurl.sh │ ├── update_con_network.py │ ├── update_v0.3.2.py │ ├── upgrade.py │ ├── upgrade_file2db.py │ └── vimrc.local ├── user/ │ ├── stopreqmgr.py │ └── user.py └── web/ ├── static/ │ ├── css/ │ │ └── docklet.css │ ├── dist/ │ │ ├── css/ │ │ │ ├── AdminLTE.css │ │ │ ├── filebox.css │ │ │ ├── flotconfig.css │ │ │ ├── modalconfig.css │ │ │ └── skins/ │ │ │ ├── _all-skins.css │ │ │ └── skin-blue.css │ │ └── js/ │ │ └── app.js │ └── js/ │ ├── plot_monitor.js │ └── plot_monitorReal.js ├── templates/ │ ├── addCluster.html │ ├── base_AdminLTE.html │ ├── batch/ │ │ ├── batch_admin_list.html │ │ ├── batch_create.html │ │ ├── batch_info.html │ │ ├── batch_list.html │ │ └── batch_output.html │ ├── beansapplication.html │ ├── cloud.html │ ├── config.html │ ├── create_notification.html │ ├── dashboard.html │ ├── description.html │ ├── error/ │ │ ├── 401.html │ │ └── 500.html │ ├── error.html │ ├── home.template │ ├── listcontainer.html │ ├── login.html │ ├── logs.html │ ├── monitor/ │ │ ├── history.html │ │ ├── historyVNode.html │ │ ├── hosts.html │ │ ├── hostsConAll.html │ │ ├── hostsRealtime.html │ │ ├── monitorUserAll.html │ │ ├── monitorUserCluster.html │ │ ├── status.html │ │ └── statusRealtime.html │ ├── notification.html │ ├── notification_info.html │ ├── opfailed.html │ ├── opsuccess.html │ ├── register.html │ ├── saveconfirm.html │ ├── settings.html │ ├── user/ │ │ ├── activate.html │ │ ├── info.html │ │ └── mailservererror.html │ └── user_list.html ├── web.py └── webViews/ ├── admin.py ├── authenticate/ │ ├── auth.py │ ├── login.py │ └── register.py ├── batch.py ├── beansapplication.py ├── checkname.py ├── cloud.py ├── cluster.py ├── cookie_tool.py ├── dashboard.py ├── dockletrequest.py ├── log.py ├── monitor.py ├── notification/ │ └── notification.py ├── reportbug.py ├── syslogs.py ├── user/ │ ├── grouplist.py │ ├── userActivate.py │ ├── userinfo.py │ └── userlist.py └── view.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ __pycache__ *.pyc *.swp __temp *~ .DS_Store docklet.conf home.html src/utils/migrations/ container.conf container.batch.conf ================================================ FILE: CHANGES ================================================ v0.4.0, May 26, 2019 -------------------- **Bug Fix** * Fix a bug of update base image. * Fix a bug of port control & a bug of update_v0.3.2.py. * Add locks to solve synchronization problems. * Fix a type error in web/web.py. * Fix a bug that net stats can't be shown. **Improvement** * [#298 #299 #300 ] Support batch computing. * Add information of login to user log and database. * Prevent users that is not activated from applying for beans. * Aggregate api of monitor at the backend and aggregate http request on status realtime pages for monitor information. * Support user to report a bug in dashboard. * Display image size when creating vcluster. * Security enhancement: forbid double slashes url, add header into nginx to defend clickjacking, add CsrfProtect, forbid methods except for GET and POST in nginx and support https... * Add LoginFailMsg into model & Ban user if he input wrong password for many times. * Add UDP4 mapping for iptables. * Support migrating containers. * Support releasing vcluster when it is stopped for too long automatically. v0.3.2, Dec 11, 2017 -------------------- **Bug Fix** * Fix the problem that some monitoring data are used before initialnizing. * Add some error message when start service failed. * Add npm registry. **Improvement** * [#277] Support egress and ingress qos rate limiting. * [#277] Support network and ports mappings billings. * Support network monitoring. * Limit the number of users' vnodes by ip addresses. * Add billing detail and billing history detail * Replace lxc-info with lxc.Container.get_cgroup_item() v0.3.0, Sep 29, 2017 -------------------- **Bug Fix** * [#180] generated_password file no exist after master init * Release ip when create container failed. **Improvement** * [#16] display file size, modification time in jupyter notebook * [#86] time display in UserList * [#87] add a new panel to approve or decline user activation requests * [#121] Autofilling may lead to a bug that makes local users cannot login * [#178] record and display history of all containers * [#210] rename Dashboard to Workspace * [#212] add docklet hyperlink in web portal * Separate user module from master. * Support multiple masters run in the same time, and the user can choose which to use in webpage. * Support distributed gateway, if enable, worker will setup its own gateway. * Support user gateway. v0.2.8, Jul 28, 2016 -------------------- **Bug Fix** * [#119] version display error **Improvement** * [#52] give user a total quota, let themselves decide how to use quota * [#72] recording the user's historical resource usage * [#85] Making workers's state consistent with master * [#88] setting config file in admin panel * [#96] Web notifications * [#113] Recovery : after poweroff, just recover container, not recover service v0.2.7, May 17, 2016 -------------------- **Bug Fix** * [#9] updating user profile taking effects immediately * [#12] logging user's activity * [#14] Can't stop vcluster by dashboard page * [#18] subprocess call should check return status * [#19] lxc config string in config file is limited in 16 bytes * [#25] bug of external login * [#30] support lxc.custom.conf in appending * [#35] nfs mountpoint bug in imagemgr.py * [#49] Fail to create container * [#57] status page of normal user failed * [#68] Not Found error when just click "Sign in" Button * [#76] unable to show and edit user table in smartphone **Improvement** * [#7] enhance quota management * [#8] independent starting of master and workers * [#20] check typing and input on web pages and web server * [#23] add LXCFS for container * [#41] move system data to global/sys * [#42] check IP and network pool when releasing IPs * [#48] token expires after some time * [#54] display container owner * [#61] rewrite httprest.py using flask routing **Notes** * If you upgrade from former version, please run tools/upgrade.py first. v0.2.6, Mar 31, 2016 -------------------- An initial release on github.com * Using the open source AdminLTE theme ================================================ FILE: LICENSE ================================================ Copyright (c) 2016, Peking University (PKU). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the PKU nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.INCLUDING NEGLIGENCE OR OTHERWISE ================================================ FILE: README.md ================================================ # Docklet https://unias.github.io/docklet ## Intro Docklet is an operating system for virtual private cloud. Its goal is to help a user group effectively share cluster resources in physical datacenter or in the cloud. In Docklet, the shared resources are organized and managed as a virtual private cloud among the user group. Every user has their own private **virtual cluster (vcluster)**, which consists of a number of virtual Linux container nodes distributed over the physical cluster. Each vcluster is separated from others and can be operated like a real physical cluster. Therefore, most applications, especially those requiring a cluster of resources can run in vcluster seamlessly. Users manage and use their vcluster resources all through web. The supported resources include CPUs, GPUs, shared storage, etc. The only client tool needed is a modern web browser supporting HTML5, like Safari, Firefox, or Chrome. The integrated *jupyter notebook* provides a web **Workspace**. Users can code, debug, test, and run their programs, even visualize the outputs online. Serverless computing and batch processing is supported. Docklet creates virtual nodes from a base image. Admins can pre-install development tools and frameworks according to their interests. The users are also free to install their specific software in their vcluster. Docklet only need **one** public IP address. The vclusters are configured to use private IP address range, e.g., 172.16.0.0/16, 192.168.0.0/16, 10.0.0.0/8. A proxy is setup to help users visit their vclusters behind the firewall/gateway. ## Architecture The Docklet system runtime consists of four main components: - distributed file system server - etcd server - docklet supermaster, master - docklet worker ![](./docklet-arch.png) For detailed information about configurations, please see [Config](#config). ## Install Currently the Docklet system is recommend to run in Ubuntu 15.10+. Ensure that python3.5 is the default python3 version. Clone Docklet from github ``` git clone https://github.com/unias/docklet.git ``` Run **prepare.sh** from console to install depended packages and generate necessary configurations. A *root* users will be created for managing the Docklet system. The password is recorded in `FS_PREFIX/local/generated_password.txt` . ## Config ## The main configuration file of docklet is conf/docklet.conf. Most default setting works for a single host environment. First copy docklet.conf.template to get docklet.conf. Pay attention to the following settings: - NETWORK_DEVICE : the network interface to use. - ETCD : the etcd server address. For distributed multi hosts environment, it should be one of the ETCD public server address. For single host environment, the default value should be OK. - STORAGE : using disk or file to storage persistent data, for single host, file is convenient. - FS_PREFIX: the working dir of docklet runtime. default is /opt/docklet. - CLUSTER_NET: the vcluster network ip address range, default is 172.16.0.1/16. This network range should all be allocated to and managed by docklet. - PROXY_PORT : the listening port of configurable-http-proxy. It proxy connections from external public network to internal private container networks. - PORTAL_URL : the portal of the system. Users access the system by visiting this address. If the system is behind a firewall, then a reverse proxy should be setup. Default is MASTER_IP:NGINX_PORT. - NGINX_PORT : the access port of the public portal. User use this port to visit docklet system. - DISTRIBUTED_GATEWAY : whether the users' gateways are distributed or not. Both master and worker must be set by same value. - PUBLIC_IP : public ip of this machine. If DISTRIBUTED_GATEWAY is True, users' gateways can be setup on this machine. Users can visit this machine by the public ip. default: IP of NETWORK_DEVICE. - USER_IP : the ip of user server. default : localhost - MASTER_IPS : tell the web server the ips of all the cluster master. - AUTH_KEY: the key to request users server from master, or to request master from users server. Please set the same value on each machine. Please don't use the default value. ## Start ## ### distributed file system ### For multi hosts distributed environment, a distributed file system is needed to store global data. Currently, glusterfs has been tested. Lets presume the file system server export filesystem as nfs **fileserver:/pub** : In each physical host to run docklet, mount **fileserver:/pub** to **FS_PEFIX/global** . For single host environment, nothing to do. ### etcd ### For single host environment, start **tools/etcd-one-node.sh** . Some recent Ubuntu releases have included **etcd** in the repository, just `apt-get install etcd`, and it need not to start etcd manually. For others, you should install etcd manually. For multi hosts distributed environment, **must** start **tools/etcd-multi-nodes.sh** in each etcd server hosts. This scripts requires users providing the etcd server address as parameters. ### supermaster ### Supermaster is a server consist of web server, user server and a master server instance. If it is the first time you start docklet, run `bin/docklet-supermaster init` to init and start a docklet master, web server and user server. Otherwise, run `bin/docklet-supermaster start`. When you start a supermaster, you don't need to start an extra master in the same cluster. ### master ### A master manages all the workers in one data center. Docklet can manage several data centers, each data center has one master server. But a docklet system will only have one supermaster. First, select a server with 2 network interface card, one having a public IP address/url, e.g., docklet.info; the other having a private IP address, e.g., 172.16.0.1. This server will be the master. If it is the first time you start docklet, run `bin/docklet-master init` to init and start docklet master. Otherwise, run `bin/docklet-master start`, which will start master in recovery mode in background using conf/docklet.conf. (Note: if docklet will run in the distributed gateway mode and recovery mode, please start the workers first.) Please fill the USER_IP and USER_PORT in conf/docklet.conf, it is the ip and port of user server. By default, it is `localhost` and `9100` You can check the daemon status by running `bin/docklet-master status` The master logs are in **FS_PREFIX/local/log/docklet-master.log** and **docklet-web.log**. ### worker ### Worker needs a basefs image to create containers. You can create such an image with `lxc-create -n test -t download`, then copy the rootfs to **FS_PREFIX/local**, and rename `rootfs` to `basefs`. Note the `jupyerhub` package must be installed for this image. And the start script `tools/start_jupyter.sh` should be placed at `basefs/home/jupyter`. You can check and run `tools/update-basefs.sh` to update basefs. Run `bin/docklet-worker start`, will start worker in background. You can check the daemon status by running `bin/docklet-worker status`. The log is in **FS_PREFIX/local/log/docklet-worker.log**. Currently, the worker must be run after the master has been started. ## Usage ## Open a browser, visiting the address specified by PORTAL_URL , e.g., ` http://docklet.info/ ` That is it. # Contribute # Contributions are welcome. Please check [devguide](doc/devguide/devguide.md) ================================================ FILE: VERSION ================================================ 0.4.0 ================================================ FILE: bin/docklet-master ================================================ #!/bin/sh [ $(id -u) != '0' ] && echo "root is needed" && exit 1 # get some path of docklet bindir=${0%/*} # $bindir maybe like /opt/docklet/src/../sbin # use command below to make $bindir in normal absolute path DOCKLET_BIN=$(cd $bindir; pwd) DOCKLET_HOME=${DOCKLET_BIN%/*} DOCKLET_CONF=$DOCKLET_HOME/conf LXC_SCRIPT=$DOCKLET_CONF/lxc-script DOCKLET_SRC=$DOCKLET_HOME/src DOCKLET_LIB=$DOCKLET_SRC DOCKLET_WEB=$DOCKLET_HOME/web DOCKLET_USER=$DOCKLET_HOME/user # default working directory, default to /opt/docklet FS_PREFIX=/opt/docklet #network interface , default is eth0 NETWORK_DEVICE=eth0 #etcd server address, default is localhost:2379 ETCD=localhost:2379 #unique cluster_name, default is docklet-vc CLUSTER_NAME=docklet-vc #web port, default is 8888 WEB_PORT=8888 USER_PORT=9100 #cluster net, default is 172.16.0.1/16 CLUSTER_NET="172.16.0.1/16" # ip addresses range of containers for batch job, default is 10.16.0.0/16 BATCH_NET="10.16.0.0/16" #configurable-http-proxy public port, default is 8000 PROXY_PORT=8000 #configurable-http-proxy api port, default is 8001 PROXY_API_PORT=8001 DISTRIBUTED_GATEWAY=False . $DOCKLET_CONF/docklet.conf export FS_PREFIX RUN_DIR=$FS_PREFIX/local/run LOG_DIR=$FS_PREFIX/local/log # This next line determines what user the script runs as. DAEMON_USER=root # settings for docklet master DAEMON_MASTER=$DOCKLET_LIB/master/httprest.py DAEMON_NAME_MASTER=docklet-master DAEMON_OPTS_MASTER= # The process ID of the script when it runs is stored here: PIDFILE_MASTER=$RUN_DIR/$DAEMON_NAME_MASTER.pid # settings for docklet web DAEMON_WEB=$DOCKLET_WEB/web.py DAEMON_NAME_WEB=docklet-web PIDFILE_WEB=$RUN_DIR/docklet-web.pid DAEMON_OPTS_WEB= # settings for docklet proxy, which is required for web access DAEMON_PROXY=`which configurable-http-proxy` DAEMON_NAME_PROXY=docklet-proxy PIDFILE_PROXY=$RUN_DIR/proxy.pid DAEMON_OPTS_PROXY= # settings for docklet user DAEMON_USER_MODULE=$DOCKLET_USER/user.py DAEMON_NAME_USER=docklet-user PIDFILE_USER=$RUN_DIR/docklet-user.pid DAEMON_OPTS_USER= RUNNING_CONFIG=$FS_PREFIX/local/docklet-running.conf export CONFIG=$RUNNING_CONFIG . /lib/lsb/init-functions ########### pre_start_master () { log_daemon_msg "Starting $DAEMON_NAME_MASTER in $FS_PREFIX" [ ! -d $FS_PREFIX/global ] && mkdir -p $FS_PREFIX/global [ ! -d $FS_PREFIX/local ] && mkdir -p $FS_PREFIX/local [ ! -d $FS_PREFIX/global/users ] && mkdir -p $FS_PREFIX/global/users [ ! -d $FS_PREFIX/global/sys ] && mkdir -p $FS_PREFIX/global/sys [ ! -d $FS_PREFIX/global/images/private ] && mkdir -p $FS_PREFIX/global/images/private [ ! -d $FS_PREFIX/global/images/public ] && mkdir -p $FS_PREFIX/global/images/public [ ! -d $FS_PREFIX/local/volume ] && mkdir -p $FS_PREFIX/local/volume [ ! -d $FS_PREFIX/local/temp ] && mkdir -p $FS_PREFIX/local/temp [ ! -d $FS_PREFIX/local/run ] && mkdir -p $FS_PREFIX/local/run [ ! -d $FS_PREFIX/local/log ] && mkdir -p $FS_PREFIX/local/log grep -P "^[\s]*[a-zA-Z]" $DOCKLET_CONF/docklet.conf > $RUNNING_CONFIG echo "DOCKLET_HOME=$DOCKLET_HOME" >> $RUNNING_CONFIG echo "DOCKLET_BIN=$DOCKLET_BIN" >> $RUNNING_CONFIG echo "DOCKLET_CONF=$DOCKLET_CONF" >> $RUNNING_CONFIG echo "LXC_SCRIPT=$LXC_SCRIPT" >> $RUNNING_CONFIG echo "DOCKLET_SRC=$DOCKLET_SRC" >> $RUNNING_CONFIG echo "DOCKLET_LIB=$DOCKLET_LIB" >> $RUNNING_CONFIG # iptables for NAT network for containers to access web iptables -t nat -F iptables -t nat -A POSTROUTING -s $CLUSTER_NET -j MASQUERADE iptables -t nat -A POSTROUTING -s $BATCH_NET -j MASQUERADE } do_start_master () { DAEMON_OPTS_MASTER=$1 # MODE : start mode # new : clean old data in etcd, global directory and start a new cluster # recovery : start cluster and recover status from etcd and global directory # Default is "recovery" start-stop-daemon --start --oknodo --background --pidfile $PIDFILE_MASTER --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_MASTER -- $DAEMON_OPTS_MASTER log_end_msg $? } pre_start_web () { log_daemon_msg "Starting $DAEMON_NAME_WEB in $FS_PREFIX" webip=$(ip addr show $NETWORK_DEVICE | grep -oE "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+/[0-9]+") [ $? != "0" ] && echo "wrong NETWORK_DEVICE $NETWORK_DEVICE" && exit 1 webip=${webip%/*} AUTH_COOKIE_URL=http://$webip:$WEB_PORT/jupyter #echo "set AUTH_COOKIE_URL:$AUTH_COOKIE_URL in etcd with key:$CLUSTER_NAME/web/authurl" curl -XPUT http://$ETCD/v2/keys/$CLUSTER_NAME/web/authurl -d value="$AUTH_COOKIE_URL" > /dev/null 2>&1 [ $? != 0 ] && echo "set AUTH_COOKIE_URL failed in etcd" && exit 1 } do_start_web () { pre_start_web DAEMON_OPTS_WEB="-p $WEB_PORT" start-stop-daemon --start --background --pidfile $PIDFILE_WEB --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_WEB -- $DAEMON_OPTS_WEB log_end_msg $? } do_start_proxy () { if [ "$DISTRIBUTED_GATEWAY" = "True" ] then return 1 fi log_daemon_msg "Starting $DAEMON_NAME_PROXY daemon in $FS_PREFIX" DAEMON_OPTS_PROXY="--port $PROXY_PORT --api-port $PROXY_API_PORT --default-target=http://localhost:8888" start-stop-daemon --start --background --pidfile $PIDFILE_PROXY --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_PROXY -- $DAEMON_OPTS_PROXY log_end_msg $? } do_start_user () { log_daemon_msg "Starting $DAEMON_NAME_USER in $FS_PREFIX" DAEMON_OPTS_USER="-p $USER_PORT" start-stop-daemon --start --background --pidfile $PIDFILE_USER --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_USER_MODULE -- $DAEMON_OPTS_USER log_end_msg $? } do_stop_master () { log_daemon_msg "Stopping $DAEMON_NAME_MASTER daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_MASTER --retry 10 log_end_msg $? } do_stop_web () { log_daemon_msg "Stopping $DAEMON_NAME_WEB daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_WEB --retry 10 log_end_msg $? } do_stop_proxy () { if [ "$DISTRIBUTED_GATEWAY" = "True" ] then return 1 fi log_daemon_msg "Stopping $DAEMON_NAME_PROXY daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_PROXY --retry 10 log_end_msg $? } do_stop_user () { log_daemon_msg "Stopping $DAEMON_NAME_USER daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_USER --retry 10 log_end_msg $? } case "$1" in init) pre_start_master do_start_master "new" do_start_proxy do_start_web ;; start) pre_start_master do_start_master "recovery" do_start_proxy do_start_web ;; stop) do_stop_master do_stop_proxy do_stop_web ;; restart) do_stop_master do_stop_proxy do_stop_web pre_start_master do_start_master "recovery" do_start_proxy do_start_web ;; start_proxy) do_start_proxy ;; start_web) do_start_web ;; stop_web) do_stop_web ;; reinit) do_stop_master do_stop_proxy pre_start_master do_start_master "new" do_start_proxy ;; status) status=0 status_of_proc -p $PIDFILE_MASTER "$DAEMON_MASTER" "$DAEMON_NAME_MASTER" || status=$? status_of_proc -p $PIDFILE_PROXY "$DAEMON_PROXY" "$DAEMON_NAME_PROXY" || status=$? exit $status ;; *) echo "Usage: $DAEMON_NAME_MASTER {init|start|stop|restart|reinit|status|start_proxy|stop_proxy|start_web|stop_web}" exit 1 ;; esac exit 0 ================================================ FILE: bin/docklet-supermaster ================================================ #!/bin/sh [ $(id -u) != '0' ] && echo "root is needed" && exit 1 # get some path of docklet bindir=${0%/*} # $bindir maybe like /opt/docklet/src/../sbin # use command below to make $bindir in normal absolute path DOCKLET_BIN=$(cd $bindir; pwd) DOCKLET_HOME=${DOCKLET_BIN%/*} DOCKLET_CONF=$DOCKLET_HOME/conf LXC_SCRIPT=$DOCKLET_CONF/lxc-script DOCKLET_SRC=$DOCKLET_HOME/src DOCKLET_LIB=$DOCKLET_SRC DOCKLET_WEB=$DOCKLET_HOME/web DOCKLET_USER=$DOCKLET_HOME/user # default working directory, default to /opt/docklet FS_PREFIX=/opt/docklet #configurable-http-proxy public port, default is 8000 PROXY_PORT=8000 #configurable-http-proxy api port, default is 8001 PROXY_API_PORT=8001 #network interface , default is eth0 NETWORK_DEVICE=eth0 #etcd server address, default is localhost:2379 ETCD=localhost:2379 #unique cluster_name, default is docklet-vc CLUSTER_NAME=docklet-vc #web port, default is 8888 WEB_PORT=8888 USER_PORT=9100 #cluster net, default is 172.16.0.1/16 CLUSTER_NET="172.16.0.1/16" # ip addresses range of containers for batch job, default is 10.16.0.0/16 BATCH_NET="10.16.0.0/16" . $DOCKLET_CONF/docklet.conf export FS_PREFIX RUN_DIR=$FS_PREFIX/local/run LOG_DIR=$FS_PREFIX/local/log # This next line determines what user the script runs as. DAEMON_USER=root # settings for docklet master DAEMON_MASTER=$DOCKLET_LIB/master/httprest.py DAEMON_NAME_MASTER=docklet-master DAEMON_OPTS_MASTER= # The process ID of the script when it runs is stored here: PIDFILE_MASTER=$RUN_DIR/$DAEMON_NAME_MASTER.pid # settings for docklet proxy, which is required for web access DAEMON_PROXY=`which configurable-http-proxy` DAEMON_NAME_PROXY=docklet-proxy PIDFILE_PROXY=$RUN_DIR/proxy.pid DAEMON_OPTS_PROXY= # settings for docklet web DAEMON_WEB=$DOCKLET_WEB/web.py DAEMON_NAME_WEB=docklet-web PIDFILE_WEB=$RUN_DIR/docklet-web.pid DAEMON_OPTS_WEB= # settings for docklet user DAEMON_USER_MODULE=$DOCKLET_USER/user.py DAEMON_NAME_USER=docklet-user PIDFILE_USER=$RUN_DIR/docklet-user.pid DAEMON_OPTS_USER= RUNNING_CONFIG=$FS_PREFIX/local/docklet-running.conf export CONFIG=$RUNNING_CONFIG . /lib/lsb/init-functions ########### pre_start_master () { log_daemon_msg "Starting $DAEMON_NAME_MASTER in $FS_PREFIX" [ ! -d $FS_PREFIX/global ] && mkdir -p $FS_PREFIX/global [ ! -d $FS_PREFIX/local ] && mkdir -p $FS_PREFIX/local [ ! -d $FS_PREFIX/global/users ] && mkdir -p $FS_PREFIX/global/users [ ! -d $FS_PREFIX/global/sys ] && mkdir -p $FS_PREFIX/global/sys [ ! -d $FS_PREFIX/global/images/private ] && mkdir -p $FS_PREFIX/global/images/private [ ! -d $FS_PREFIX/global/images/public ] && mkdir -p $FS_PREFIX/global/images/public [ ! -d $FS_PREFIX/local/volume ] && mkdir -p $FS_PREFIX/local/volume [ ! -d $FS_PREFIX/local/temp ] && mkdir -p $FS_PREFIX/local/temp [ ! -d $FS_PREFIX/local/run ] && mkdir -p $FS_PREFIX/local/run [ ! -d $FS_PREFIX/local/log ] && mkdir -p $FS_PREFIX/local/log grep -P "^[\s]*[a-zA-Z]" $DOCKLET_CONF/docklet.conf > $RUNNING_CONFIG echo "DOCKLET_HOME=$DOCKLET_HOME" >> $RUNNING_CONFIG echo "DOCKLET_BIN=$DOCKLET_BIN" >> $RUNNING_CONFIG echo "DOCKLET_CONF=$DOCKLET_CONF" >> $RUNNING_CONFIG echo "LXC_SCRIPT=$LXC_SCRIPT" >> $RUNNING_CONFIG echo "DOCKLET_SRC=$DOCKLET_SRC" >> $RUNNING_CONFIG echo "DOCKLET_LIB=$DOCKLET_LIB" >> $RUNNING_CONFIG # iptables for NAT network for containers to access web iptables -t nat -F iptables -t nat -A POSTROUTING -s $CLUSTER_NET -j MASQUERADE iptables -t nat -A POSTROUTING -s $BATCH_NET -j MASQUERADE } do_start_master () { DAEMON_OPTS_MASTER=$1 # MODE : start mode # new : clean old data in etcd, global directory and start a new cluster # recovery : start cluster and recover status from etcd and global directory # Default is "recovery" $DOCKLET_HOME/tools/nginx_config.sh start-stop-daemon --start --oknodo --background --pidfile $PIDFILE_MASTER --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_MASTER -- $DAEMON_OPTS_MASTER log_end_msg $? } do_start_proxy () { if [ "$DISTRIBUTED_GATEWAY" = "True" ] then return 1 fi log_daemon_msg "Starting $DAEMON_NAME_PROXY daemon in $FS_PREFIX" DAEMON_OPTS_PROXY="--port $PROXY_PORT --api-port $PROXY_API_PORT --default-target=http://localhost:8888" start-stop-daemon --start --background --pidfile $PIDFILE_PROXY --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_PROXY -- $DAEMON_OPTS_PROXY log_end_msg $? } pre_start_web () { log_daemon_msg "Starting $DAEMON_NAME_WEB in $FS_PREFIX" webip=$(ip addr show $NETWORK_DEVICE | grep -oE "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+/[0-9]+") [ $? != "0" ] && echo "wrong NETWORK_DEVICE $NETWORK_DEVICE" && exit 1 webip=${webip%/*} AUTH_COOKIE_URL=http://$webip:$WEB_PORT/jupyter #echo "set AUTH_COOKIE_URL:$AUTH_COOKIE_URL in etcd with key:$CLUSTER_NAME/web/authurl" curl -XPUT http://$ETCD/v2/keys/$CLUSTER_NAME/web/authurl -d value="$AUTH_COOKIE_URL" > /dev/null 2>&1 [ $? != 0 ] && echo "set AUTH_COOKIE_URL failed in etcd" && exit 1 } do_start_web () { pre_start_web DAEMON_OPTS_WEB="-p $WEB_PORT" start-stop-daemon --start --background --pidfile $PIDFILE_WEB --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_WEB -- $DAEMON_OPTS_WEB log_end_msg $? } do_start_user () { log_daemon_msg "Starting $DAEMON_NAME_USER in $FS_PREFIX" DAEMON_OPTS_USER="-p $USER_PORT" start-stop-daemon --start --background --pidfile $PIDFILE_USER --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_USER_MODULE -- $DAEMON_OPTS_USER log_end_msg $? } do_stop_master () { log_daemon_msg "Stopping $DAEMON_NAME_MASTER daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_MASTER --retry 10 log_end_msg $? } do_stop_proxy () { if [ "$DISTRIBUTED_GATEWAY" = "True" ] then return 1 fi log_daemon_msg "Stopping $DAEMON_NAME_PROXY daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_PROXY --retry 10 log_end_msg $? } do_stop_web () { log_daemon_msg "Stopping $DAEMON_NAME_WEB daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_WEB --retry 10 log_end_msg $? } do_stop_user () { log_daemon_msg "Stopping $DAEMON_NAME_USER daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_USER --retry 10 log_end_msg $? } case "$1" in init) pre_start_master do_start_user do_start_proxy do_start_web do_start_master "new" ;; start) pre_start_master do_start_user do_start_proxy do_start_web do_start_master "recovery" ;; stop) do_stop_web do_stop_proxy do_stop_master do_stop_user ;; restart) do_stop_user do_stop_web do_stop_proxy do_stop_master pre_start_master do_start_user do_start_proxy do_start_web do_start_master "recovery" ;; start_proxy) do_start_proxy ;; stop_proxy) do_stop_proxy ;; start_web) do_start_web ;; stop_web) do_stop_web ;; start_user) do_start_user ;; stop_user) do_stop_user ;; reinit) do_stop_web do_stop_proxy do_stop_master do_stop_user pre_start_master do_start_user do_start_proxy do_start_web do_start_master "new" ;; status) status=0 status_of_proc -p $PIDFILE_MASTER "$DAEMON_MASTER" "$DAEMON_NAME_MASTER" || status=$? status_of_proc -p $PIDFILE_PROXY "$DAEMON_PROXY" "$DAEMON_NAME_PROXY" || status=$? status_of_proc -p $PIDFILE_WEB "$DAEMON_WEB" "$DAEMON_NAME_WEB" || status=$? status_of_proc -p $PIDFILE_USER "$DAEMON_USER" "$DAEMON_NAME_USER" || status=$? exit $status ;; *) echo "Usage: $DAEMON_NAME_MASTER {init|start|stop|restart|reinit|status|start_proxy|stop_proxy|start_web|stop_web}" exit 1 ;; esac exit 0 ================================================ FILE: bin/docklet-worker ================================================ #!/bin/sh [ $(id -u) != '0' ] && echo "root is needed" && exit 1 # get some path of docklet bindir=${0%/*} # $bindir maybe like /opt/docklet/src/../bin # use command below to make $bindir in normal absolute path DOCKLET_BIN=$(cd $bindir; pwd) DOCKLET_HOME=${DOCKLET_BIN%/*} DOCKLET_CONF=$DOCKLET_HOME/conf LXC_SCRIPT=$DOCKLET_CONF/lxc-script DOCKLET_SRC=$DOCKLET_HOME/src DOCKLET_LIB=$DOCKLET_SRC DOCKLET_WEB=$DOCKLET_HOME/web # working directory, default to /opt/docklet FS_PREFIX=/opt/docklet # cluster net ip range, default is 172.16.0.1/16 CLUSTER_NET="172.16.0.1/16" # ip addresses range of containers for batch job, default is 10.16.0.0/16 BATCH_NET="10.16.0.0/16" #configurable-http-proxy public port, default is 8000 PROXY_PORT=8000 #configurable-http-proxy api port, default is 8001 PROXY_API_PORT=8001 DISTRIBUTED_GATEWAY=False . $DOCKLET_CONF/docklet.conf export FS_PREFIX RUN_DIR=$FS_PREFIX/local/run LOG_DIR=$FS_PREFIX/local/log # This next line determines what user the script runs as. DAEMON_USER=root # settings for docklet worker DAEMON=$DOCKLET_LIB/worker/worker.py DAEMON_NAME=docklet-worker DAEMON_OPTS= # The process ID of the script when it runs is stored here: PIDFILE=$RUN_DIR/$DAEMON_NAME.pid # settings for docklet batch worker, which is required for batch job processing system BATCH_ON=True DAEMON_BATCH=$DOCKLET_LIB/worker/taskworker.py DAEMON_NAME_BATCH=docklet-taskworker PIDFILE_BATCH=$RUN_DIR/batch.pid DAEMON_OPTS_BATCH= # settings for docklet proxy, which is required for web access DAEMON_PROXY=`which configurable-http-proxy` DAEMON_NAME_PROXY=docklet-proxy PIDFILE_PROXY=$RUN_DIR/proxy.pid DAEMON_OPTS_PROXY= DOCKMETER_NAME=$DAEMON_NAME-metering DOCKMETER_PIDFILE=$RUN_DIR/$DOCKMETER_NAME.pid . /lib/lsb/init-functions ########### update_container_conf () { LXC_VERSION=$(lxc-start --version | awk -F "." '{print $1}') #echo $LXC_VERSION if [ "$LXC_VERSION"x != "2"x ]&&[ "$LXC_VERSION"x != "3"x ];then LXC_VERSION=2 fi #echo $LXC_VERSION cp $DOCKLET_CONF/container/lxc$LXC_VERSION.container.conf $DOCKLET_CONF/container.conf cp $DOCKLET_CONF/container/lxc$LXC_VERSION.container.batch.conf $DOCKLET_CONF/container.batch.conf #echo "cp $DOCKLET_CONF/container/lxc$LXC_VERSION.container.conf $DOCKLET_CONF/container.conf" } pre_start () { [ ! -d $FS_PREFIX/global ] && mkdir -p $FS_PREFIX/global [ ! -d $FS_PREFIX/local ] && mkdir -p $FS_PREFIX/local [ ! -d $FS_PREFIX/global/users ] && mkdir -p $FS_PREFIX/global/users [ ! -d $FS_PREFIX/local/volume ] && mkdir -p $FS_PREFIX/local/volume [ ! -d $FS_PREFIX/local/temp ] && mkdir -p $FS_PREFIX/local/temp [ ! -d $FS_PREFIX/local/run ] && mkdir -p $FS_PREFIX/local/run [ ! -d $FS_PREFIX/local/log ] && mkdir -p $FS_PREFIX/local/log tempdir=/opt/docklet/local/temp RUNNING_CONFIG=$FS_PREFIX/local/docklet-running.conf grep -P "^[\s]*[a-zA-Z]" $DOCKLET_CONF/docklet.conf > $RUNNING_CONFIG echo "DOCKLET_HOME=$DOCKLET_HOME" >> $RUNNING_CONFIG echo "DOCKLET_BIN=$DOCKLET_BIN" >> $RUNNING_CONFIG echo "DOCKLET_CONF=$DOCKLET_CONF" >> $RUNNING_CONFIG echo "LXC_SCRIPT=$LXC_SCRIPT" >> $RUNNING_CONFIG echo "DOCKLET_SRC=$DOCKLET_SRC" >> $RUNNING_CONFIG echo "DOCKLET_LIB=$DOCKLET_LIB" >> $RUNNING_CONFIG export CONFIG=$RUNNING_CONFIG # iptables for NAT network for containers to access web iptables -t nat -F iptables -t nat -A POSTROUTING -s $CLUSTER_NET -j MASQUERADE iptables -t nat -A POSTROUTING -s $BATCH_NET -j MASQUERADE if [ ! -d $FS_PREFIX/local/basefs ]; then log_daemon_msg "basefs does not exist, run prepare.sh first" && exit 1 fi if [ ! -d $FS_PREFIX/local/packagefs ]; then mkdir -p $FS_PREFIX/local/packagefs fi update_container_conf } do_start() { pre_start DAEMON_OPTS=$1 log_daemon_msg "Starting $DAEMON_NAME in $FS_PREFIX" #python3 $DAEMON start-stop-daemon --start --oknodo --background --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON -- $DAEMON_OPTS log_end_msg $? } do_start_batch () { if [ "$BATCH_ON" = "False" ] then return 1 fi log_daemon_msg "Starting $DAEMON_NAME_BATCH in $FS_PREFIX" DAEMON_OPTS_BATCH="" start-stop-daemon --start --background --pidfile $PIDFILE_BATCH --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_BATCH -- $DAEMON_OPTS_BATCH log_end_msg $? } do_start_proxy () { if [ "$DISTRIBUTED_GATEWAY" = "False" ] then return 1 fi log_daemon_msg "Starting $DAEMON_NAME_PROXY daemon in $FS_PREFIX" DAEMON_OPTS_PROXY="--port $PROXY_PORT --api-port $PROXY_API_PORT --default-target=http://localhost:8888" start-stop-daemon --start --background --pidfile $PIDFILE_PROXY --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON_PROXY -- $DAEMON_OPTS_PROXY log_end_msg $? } do_stop () { log_daemon_msg "Stopping $DAEMON_NAME daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE --retry 10 log_end_msg $? } do_stop_batch () { if [ "$BATCH_ON" = "False" ] then return 1 fi log_daemon_msg "Stopping $DAEMON_NAME_BATCH daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_BATCH --retry 10 log_end_msg $? } do_stop_proxy () { if [ "$DISTRIBUTED_GATEWAY" = "False" ] then return 1 fi log_daemon_msg "Stopping $DAEMON_NAME_PROXY daemon" start-stop-daemon --stop --quiet --oknodo --remove-pidfile --pidfile $PIDFILE_PROXY --retry 10 log_end_msg $? } do_start_meter() { log_daemon_msg "Starting $DOCKMETER_NAME in $FS_PREFIX" start-stop-daemon --start --background --pidfile $DOCKMETER_PIDFILE --make-pidfile --exec $DOCKLET_HOME/meter/main.py log_end_msg $? } do_stop_meter() { log_daemon_msg "Stopping $DOCKMETER_NAME daemon" start-stop-daemon --stop --pidfile $DOCKMETER_PIDFILE --remove-pidfile log_end_msg $? } case "$1" in start) do_start "normal-worker" do_start_batch do_start_proxy ;; stop) do_stop do_stop_batch do_stop_proxy ;; start-meter) do_start_meter ;; stop-meter) do_stop_meter ;; start_batch) do_start "batch-worker" do_start_batch ;; stop_batch) do_stop do_stop_batch ;; start_proxy) do_start_proxy ;; stop_proxy) do_stop_proxy ;; console) pre_start cprofilev $DAEMON $DAEMON_OPTS ;; restart) do_stop do_stop_batch do_stop_proxy do_start "normal-worker" do_start_batch do_start_proxy ;; status) status_of_proc -p $PIDFILE "$DAEMON" "$DAEMON_NAME" && exit 0 || exit $? status_of_proc -p $PIDFILE_BATCH "$DAEMON_BATCH" "$DAEMON_NAME_BATCH" || status=$? status_of_proc -p $PIDFILE_PROXY "$DAEMON_PROXY" "$DAEMON_NAME_PROXY" || status=$? ;; *) echo "Usage: $DAEMON_NAME {start|stop|restart|status}" exit 1 ;; esac exit 0 ================================================ FILE: cloudsdk-installer.sh ================================================ #!/bin/bash if [[ "`whoami`" != "root" ]]; then echo "FAILED: Require root previledge !" > /dev/stderr exit 1 fi pip3 install aliyun-python-sdk-core-v3 pip3 install aliyun-python-sdk-ecs exit 0 ================================================ FILE: conf/container/lxc2.container.batch.conf ================================================ # This is the common container.conf for all containers. # If want set custom settings, you have two choices: # 1. Directly modify this file, which is not recommend, because the # setting will be overriden when new version container.conf released. # 2. Use a custom config file in this conf directory: lxc.custom.conf, # it uses the same grammer as container.conf, and will be merged # with the default container.conf by docklet at runtime. # # The following is an example mounting user html directory # lxc.mount.entry = /public/home/%USERNAME%/public_html %ROOTFS%/root/public_html none bind,rw,create=dir 0 0 # #### include /usr/share/lxc/config/ubuntu.common.conf lxc.include = /usr/share/lxc/config/ubuntu.common.conf ############## DOCKLET CONFIG ############## # Setup 0 tty devices lxc.tty = 0 lxc.rootfs = %ROOTFS% lxc.utsname = %HOSTNAME% lxc.network.type = veth lxc.network.name = eth0 # veth.pair is limited in 16 bytes lxc.network.veth.pair = %VETHPAIR% lxc.network.script.up = %LXCSCRIPT%/lxc-ifup lxc.network.script.down = %LXCSCRIPT%/lxc-ifdown lxc.network.ipv4 = %IP% lxc.network.ipv4.gateway = %GATEWAY% lxc.network.flags = up lxc.network.mtu = 1420 lxc.cgroup.pids.max = 2048 lxc.cgroup.memory.limit_in_bytes = %CONTAINER_MEMORY%M #lxc.cgroup.memory.kmem.limit_in_bytes = 512M #lxc.cgroup.memory.soft_limit_in_bytes = 4294967296 #lxc.cgroup.memory.memsw.limit_in_bytes = 8589934592 # lxc.cgroup.cpu.cfs_period_us : period time of cpu, default 100000, means 100ms # lxc.cgroup.cpu.cfs_quota_us : quota time of this process lxc.cgroup.cpu.cfs_quota_us = %CONTAINER_CPU% lxc.cap.drop = sys_admin net_admin mac_admin mac_override sys_time sys_module lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/data %ROOTFS%/root/nfs none bind,rw,create=dir 0 0 lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/hosts/batch-%TASKID%.hosts %ROOTFS%/etc/hosts none bind,ro,create=file 0 0 lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/ssh %ROOTFS%/root/.ssh none bind,ro,create=dir 0 0 lxc.mount.entry = %FS_PREFIX%/local/temp/%LXCNAME%/ %ROOTFS%/tmp none bind,rw,create=dir 0 0 # setting hostname lxc.hook.pre-start = %LXCSCRIPT%/lxc-prestart # setting nfs softlink #lxc.hook.mount = %LXCSCRIPT%/lxc-mount ================================================ FILE: conf/container/lxc2.container.conf ================================================ # This is the common container.conf for all containers. # If want set custom settings, you have two choices: # 1. Directly modify this file, which is not recommend, because the # setting will be overriden when new version container.conf released. # 2. Use a custom config file in this conf directory: lxc.custom.conf, # it uses the same grammer as container.conf, and will be merged # with the default container.conf by docklet at runtime. # # The following is an example mounting user html directory # lxc.mount.entry = /public/home/%USERNAME%/public_html %ROOTFS%/root/public_html none bind,rw,create=dir 0 0 # #### include /usr/share/lxc/config/ubuntu.common.conf lxc.include = /usr/share/lxc/config/ubuntu.common.conf ############## DOCKLET CONFIG ############## # Setup 0 tty devices lxc.tty = 0 lxc.rootfs = %ROOTFS% lxc.utsname = %HOSTNAME% lxc.network.type = veth lxc.network.name = eth0 # veth.pair is limited in 16 bytes lxc.network.veth.pair = %VETHPAIR% lxc.network.script.up = %LXCSCRIPT%/lxc-ifup lxc.network.script.down = %LXCSCRIPT%/lxc-ifdown lxc.network.ipv4 = %IP% lxc.network.ipv4.gateway = %GATEWAY% lxc.network.flags = up lxc.network.mtu = 1420 lxc.cgroup.pids.max = 2048 lxc.cgroup.memory.limit_in_bytes = %CONTAINER_MEMORY%M #lxc.cgroup.memory.kmem.limit_in_bytes = 512M #lxc.cgroup.memory.soft_limit_in_bytes = 4294967296 #lxc.cgroup.memory.memsw.limit_in_bytes = 8589934592 # lxc.cgroup.cpu.cfs_period_us : period time of cpu, default 100000, means 100ms # lxc.cgroup.cpu.cfs_quota_us : quota time of this process lxc.cgroup.cpu.cfs_quota_us = %CONTAINER_CPU% lxc.cap.drop = sys_admin net_admin mac_admin mac_override sys_time sys_module lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/data %ROOTFS%/root/nfs none bind,rw,create=dir 0 0 lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/hosts/%CLUSTERID%.hosts %ROOTFS%/etc/hosts none bind,ro,create=file 0 0 lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/ssh %ROOTFS%/root/.ssh none bind,ro,create=dir 0 0 lxc.mount.entry = %FS_PREFIX%/local/temp/%LXCNAME%/ %ROOTFS%/tmp none bind,rw,create=dir 0 0 # setting hostname lxc.hook.pre-start = %LXCSCRIPT%/lxc-prestart # setting nfs softlink #lxc.hook.mount = %LXCSCRIPT%/lxc-mount ================================================ FILE: conf/container/lxc3.container.batch.conf ================================================ # This is the common container.conf for all containers. # If want set custom settings, you have two choices: # 1. Directly modify this file, which is not recommend, because the # setting will be overriden when new version container.conf released. # 2. Use a custom config file in this conf directory: lxc.custom.conf, # it uses the same grammer as container.conf, and will be merged # with the default container.conf by docklet at runtime. # # The following is an example mounting user html directory # lxc.mount.entry = /public/home/%USERNAME%/public_html %ROOTFS%/root/public_html none bind,rw,create=dir 0 0 # #### include /usr/share/lxc/config/ubuntu.common.conf lxc.include = /usr/share/lxc/config/ubuntu.common.conf ############## DOCKLET CONFIG ############## # Setup 0 tty devices lxc.tty.max = 0 lxc.rootfs.path = %ROOTFS% lxc.uts.name = %HOSTNAME% lxc.net.0.type = veth lxc.net.0.name = eth0 # veth.pair is limited in 16 bytes lxc.net.0.veth.pair = %VETHPAIR% lxc.net.0.script.up = %LXCSCRIPT%/lxc-ifup lxc.net.0.script.down = %LXCSCRIPT%/lxc-ifdown lxc.net.0.ipv4.address = %IP% lxc.net.0.ipv4.gateway = %GATEWAY% lxc.net.0.flags = up lxc.net.0.mtu = 1420 lxc.cgroup.pids.max = 2048 lxc.cgroup.memory.limit_in_bytes = %CONTAINER_MEMORY%M #lxc.cgroup.memory.kmem.limit_in_bytes = 512M #lxc.cgroup.memory.soft_limit_in_bytes = 4294967296 #lxc.cgroup.memory.memsw.limit_in_bytes = 8589934592 # lxc.cgroup.cpu.cfs_period_us : period time of cpu, default 100000, means 100ms # lxc.cgroup.cpu.cfs_quota_us : quota time of this process lxc.cgroup.cpu.cfs_quota_us = %CONTAINER_CPU% lxc.cap.drop = sys_admin net_admin mac_admin mac_override sys_time sys_module lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/data %ROOTFS%/root/nfs none bind,rw,create=dir 0 0 lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/hosts/batch-%TASKID%.hosts %ROOTFS%/etc/hosts none bind,ro,create=file 0 0 lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/ssh %ROOTFS%/root/.ssh none bind,ro,create=dir 0 0 lxc.mount.entry = %FS_PREFIX%/local/temp/%LXCNAME%/ %ROOTFS%/tmp none bind,rw,create=dir 0 0 # setting hostname lxc.hook.pre-start = %LXCSCRIPT%/lxc-prestart # setting nfs softlink #lxc.hook.mount = %LXCSCRIPT%/lxc-mount ================================================ FILE: conf/container/lxc3.container.conf ================================================ # This is the common container.conf for all containers. # If want set custom settings, you have two choices: # 1. Directly modify this file, which is not recommend, because the # setting will be overriden when new version container.conf released. # 2. Use a custom config file in this conf directory: lxc.custom.conf, # it uses the same grammer as container.conf, and will be merged # with the default container.conf by docklet at runtime. # # The following is an example mounting user html directory # lxc.mount.entry = /public/home/%USERNAME%/public_html %ROOTFS%/root/public_html none bind,rw,create=dir 0 0 # #### include /usr/share/lxc/config/ubuntu.common.conf lxc.include = /usr/share/lxc/config/ubuntu.common.conf ############## DOCKLET CONFIG ############## # Setup 0 tty devices lxc.tty.max = 0 lxc.rootfs.path = %ROOTFS% lxc.uts.name = %HOSTNAME% lxc.net.0.type = veth lxc.net.0.name = eth0 # veth.pair is limited in 16 bytes lxc.net.0.veth.pair = %VETHPAIR% lxc.net.0.script.up = %LXCSCRIPT%/lxc-ifup lxc.net.0.script.down = %LXCSCRIPT%/lxc-ifdown lxc.net.0.ipv4.address = %IP% lxc.net.0.ipv4.gateway = %GATEWAY% lxc.net.0.flags = up lxc.net.0.mtu = 1420 lxc.cgroup.pids.max = 2048 lxc.cgroup.memory.limit_in_bytes = %CONTAINER_MEMORY%M #lxc.cgroup.memory.kmem.limit_in_bytes = 512M #lxc.cgroup.memory.soft_limit_in_bytes = 4294967296 #lxc.cgroup.memory.memsw.limit_in_bytes = 8589934592 # lxc.cgroup.cpu.cfs_period_us : period time of cpu, default 100000, means 100ms # lxc.cgroup.cpu.cfs_quota_us : quota time of this process lxc.cgroup.cpu.cfs_quota_us = %CONTAINER_CPU% lxc.cap.drop = sys_admin net_admin mac_admin mac_override sys_time sys_module lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/data %ROOTFS%/root/nfs none bind,rw,create=dir 0 0 lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/hosts/%CLUSTERID%.hosts %ROOTFS%/etc/hosts none bind,ro,create=file 0 0 lxc.mount.entry = %FS_PREFIX%/global/users/%USERNAME%/ssh %ROOTFS%/root/.ssh none bind,ro,create=dir 0 0 lxc.mount.entry = %FS_PREFIX%/local/temp/%LXCNAME%/ %ROOTFS%/tmp none bind,rw,create=dir 0 0 # setting hostname lxc.hook.pre-start = %LXCSCRIPT%/lxc-prestart # setting nfs softlink #lxc.hook.mount = %LXCSCRIPT%/lxc-mount ================================================ FILE: conf/docklet.conf.template ================================================ # ================================================== # # [Local config example] # # ================================================== # CLUSTER_NAME: name of host cluster, every host cluster should have # a unique name, default is docklet-vc # CLUSTER_NAME=docklet-vc # FS_PREFIX: path to store global and local data for docklet # default is /opt/docklet. # # Note: $FS_PREFIX/global is for storing persistent data, e.g., # custom container images, user data, etc. For a multi hosts # environement, it is the mountpoint of the distributed filesystem # that all physical hosts (master and slave) share. # E.g., for a system with three hosts: computing hosts A and B, # strorage host C. Host C exports its stroage filesystem through nfs # as C:/data, then host A and B should mount C:/data to $FS_PREFIX/global. # Please make sure that the mount is OK before launching docklet. # # FS_PREFIX=/opt/docklet # STORAGE: local storage type, file or disk, default is file # note lvm is required for either case # # file : a large file simulating raw disk storing container runtime # data, located in FS_PREFIX/local, for single machine testing purpose. # # disk : raw disk for storing container files, for production purpose. # If using disk, a partition must be allocated to docklet # - a disk device name must be specified by DISK , e.g, /dev/sdc9 # - this device must be formatted as Linux-LVM, and initialized # as a physical volume (pvcreate /dev/sdc9) in advance. # TAKE CARE to ensure the disk is OK before launching docklet. # # STORAGE=file # # DISK: disk device name if STORAGE is disk # DISK=/dev/sdc9 # CLUSTER_SIZE: virtual cluster size, default is 1 # CLUSTER_SIZE=1 # CLUSTER_NET: cluster network ip address range, default is 172.16.0.1/16 # CLUSTER_NET=172.16.0.1/16 # Deprecated since v0.2.7. read from quota group set in web admin page # CONTAINER_CPU: CPU quota of container, default is 100000 # A single CPU core has total=100000 (100ms), so the default 100000 # mean a single container can occupy a whole core. # For a CPU with two cores, this can be set to 200000 # CONTAINER_CPU=100000 # Deprecated since v0.2.7. read from quota group set in web admin page # CONTAINER_DISK: disk quota of container image upper layer, count in MB, # default is 1000 # CONTAINER_DISK=1000 # Deprecated since v0.2.7. read from quota group set in web admin page # CONTAINER_MEMORY: memory quota of container, count in MB, default is 1000 # CONTAINER_MEMORY=1000 # DISKPOOL_SIZE: lvm group size, count in MB, default is 10000 # Only valid with STORAGE=file # DISKPOOL_SIZE=10000 # ETCD: etcd address, default is localhost:2379 # For a muti hosts environment, the administrator should configure how # etcd cluster work together # ETCD=localhost:2379 # NETWORK_DEVICE: specify the network interface docklet uses, # Default is eth0 # NETWORK_DEVICE=eth0 # PORTAL_URL: the public docklet portal url. for a production system, # it should be a valid URL, like http://docklet.info # default is MASTER_IP:NGINX_PORT # PORTAL_URL=http://localhost:8080 # MASTER_IP: master listen ip, default listens on all interfaces # MASTER_IP=0.0.0.0 # MASTER_PORT: master listen port, default is 9000 # MASTER_PORT=9000 # WORKER_PORT: worker listen port, default is 9001 # WORKER_PORT=9001 # NGINX_PORT: the access port of the public portal, default is 8080 # This is the listening port of nginx server. The nginx server forwards # requests according to the requests' urls. If the urls are to workspaces, # it will forward requests to the configurable-http-proxy, otherwise, # to the docklet web. Usually 80 is recommded for production environment # NGINX_PORT=8080 # PROXY_PORT: the listening port of configurable-http-proxy, default is 8000 # it proxy connections from exteral public network to internal private # container networks. # PROXY_PORT=8000 # PROXY_API_PORT: configurable-http-proxy api port, default is 8001 # Admins can query the proxy table by calling: # curl http://localhost:8001/api/routes # PROXY_API_PORT=8001 # WEB_PORT: docklet web listening port, default is 8888 # Note: docklet web server is located behind the docklet proxy. # Users access docklet first through proxy, then docklet web server. # Therefore, it is not for user direct access. In most cases, # admins need not to change the default value. # WEB_PORT=8888 # LOG_LEVEL: logging level, of DEBUG, INFO, WARNING, ERROR, CRITICAL # default is DEBUG # LOG_LEVEL=DEBUG # LOG_LIFE: how many days the logs will be kept, default is 10 # LOG_LIFE=10 # WEB_LOG_LEVEL: logging level, of DEBUG, INFO, WARNING, ERROR, CRITICAL # default is DEBUG # WEB_LOG_LEVEL=DEBUG # EXTERNAL_LOGIN: whether docklet will use external account to log in # True or False, default is False # default: authenticate local and PAM users # EXTERNAL_LOGIN=False # DATA_QUOTA : whether enable the quota of data volume or not # True or False, default: False # DATA_QUOTA=False # DATA_QUOTA_CMD : the cmd to set the quota of a given directory. It accepts two arguments: # arg1: the directory name, relative path from the data volume root, e.g, "/users/bob/data" # arg2: the quota value in GB of string, e.g., "100" # default: "gluster volume quota docklet-volume limit-usage %s %s" # DATA_QUOTA_CMD="gluster volume quota docklet-volume limit-usage %s %s" # DISTRIBUTED_GATEWAY : whether the users' gateways are distributed or not # Must be set by same value on master and workers. # True or False, default: False # DISTRIBUTED_GATEWAY=False # PUBLIC_IP : publick ip of this machine. If DISTRIBUTED_GATEWAY is True, # users' gateways can be setup on this machine. Users can visit this machine # by the public ip. default: IP of NETWORK_DEVICE. # PUBLIC_IP=0.0.0.0 # NGINX_CONF: the config path of nginx, default: /etc/nginx # NGINX_CONF=/etc/nginx # MASTER_IPS: all master ips in a cente, depart by ','. # e.g:192.168.192.191@master1,192.168.192.192@master2 # you can also add description to each master. # e.g:master1_desc="this is master1" # defalut:0.0.0.0@docklet # MASTER_IPS=0.0.0.0@docklet # USER_IP: user listen ip # default:0.0.0.0 # USER_IP=0.0.0.0 # USER_PORT: user listen port # default:9100 # USER_PORT=9100 # AUTH_KEY: the key to request users server from master, # or to request master from users server. Please set the # same value on each machine. Please don't use the default value. # AUTH_KEY=docklet # ALLOCATED_PORTS: the ports on this host that will be allocated to users. # The allocated ports are for ports mapping. Default: 10000-65535 # The two ports next to '-' are inclueded. If there are several ranges, # Please seperate them by ',' , for example: 10000-20000,30000-40000 # ALLOCATED_PORTS=10000-65535 # ALLOW_SCALE_OUT: allow docklet to rent server on the cloud to scale out # Only when you deploy docklet on the cloud can you set it to True # ALLOW_SCALE_OUT=False # WARNING_DAYS: user will receive a warning email for releasing # when his/her vcluster has been stopped for more then the days. # Default: 7 # WARNING_DAYS=7 # RELEASE_DAYS: the vcluster will be released when it has been # stopped for more then the days. Needs to be larger then WARNING_DAYS. # Default: 14 # RELEASE_DAYS=14 # ================================================== # # Batch Config # # ================================================== # BATCH_ON: whether to start batch job processing system when start # the docklet. Default: True # BATCH_ON=True # BATCH_MASTER_PORT: the rpc server port on master. # default: 50050 # BATCH_MASTER_PORT=50050 # BATCH_WORKER_PORT: the rpc server port on worker. # default: 50051 # BATCH_WORKER_PORT=50051 # BATCH_NET: ip addresses range of containers for batch job, default is 10.16.0.0/16 # BATCH_NET=10.16.0.0/16 # BATCH_TASK_CIDR: 2^(BATCH_TASK_CIDR)-2 is the number of ip addresses for a task, default is 4 # BATCH_TASK_CIDR=4 # BATCH_MAX_THREAD_WORKER: the maximun number of threads of the rpc server on # the batch job worker. default:5 # BATCH_MAX_THREAD_WORKER=5 # BATCH_GPU_BILLING: beans cost per hour by different GPUs # The GPU's name can be found by 'nvidia-smi -L' and all spaces need be replaced by '-' # default: 100 # BATCH_GPU_BILLING=default:100,GeForce-GTX-1080-Ti:100,GeForce-GTX-2080-Ti:150,Tesla-V100-PCIE-16GB:200 ================================================ FILE: conf/lxc-script/lxc-ifdown ================================================ #!/bin/sh # $1 : name of container ( name in lxc-start with -n) # $2 : net # $3 : network flags, up or down # $4 : network type, for example, veth # $5 : value of lxc.network.veth.pair . $LXC_ROOTFS_PATH/../env.conf ovs-vsctl --if-exists del-port $Bridge $5 cnt=$(ovs-vsctl list-ports ${Bridge} | wc -l) if [ "$cnt" = "1" ]; then greport=$(ovs-vsctl list-ports ${Bridge} | grep "gre" | wc -l) if [ "$greport" = "1" ]; then ovs-vsctl del-br $Bridge fi fi ================================================ FILE: conf/lxc-script/lxc-ifup ================================================ #!/bin/sh # $1 : name of container ( name in lxc-start with -n) # $2 : net # $3 : network flags, up or down # $4 : network type, for example, veth # $5 : value of lxc.network.veth.pair . $LXC_ROOTFS_PATH/../env.conf ovs-vsctl --may-exist add-br $Bridge ovs-vsctl --may-exist add-port $Bridge $5 ================================================ FILE: conf/lxc-script/lxc-mount ================================================ #!/bin/sh # $1 Container name. # $2 Section (always 'lxc'). # $3 The hook type (i.e. 'clone' or 'pre-mount'). #cd $LXC_ROOTFS_PATH/root ; rm -rf nfs && ln -s ../nfs nfs ================================================ FILE: conf/lxc-script/lxc-prestart ================================================ #!/bin/sh # $1 Container id # $2 Container name. # $3 Section (always 'lxc'). # $4 The hook type (i.e. 'clone' or 'pre-mount'). # following environment variables are set by lxc : # $LXC_NAME: is the container's name. # $LXC_ROOTFS_MOUNT: the path to the mounted root filesystem. # $LXC_CONFIG_FILE: the path to the container configuration file. # $LXC_SRC_NAME: in the case of the clone hook, this is the original container's name. # $LXC_ROOTFS_PATH: this is the lxc.rootfs entry for the container. # Note this is likely not where the mounted rootfs is to be found, use LXC_ROOTFS_MOUNT for that. . $LXC_ROOTFS_PATH/../env.conf echo $HNAME > $LXC_ROOTFS_PATH/etc/hostname ================================================ FILE: conf/nginx_docklet.conf ================================================ server { listen %NGINX_PORT; #ssl on; #ssl_certificate /etc/nginx/ssl/server.crt; #ssl_certificate_key /etc/nginx/ssl/server.key; #ssl_protocols TLSv1.2 TLSv1.3; #ssl_prefer_server_ciphers on; #ssl_ciphers TLS13-AES-128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4; server_name nginx_docklet.conf; charset UTF-8; add_header X-Frame-Options SAMEORIGIN; merge_slashes off; rewrite (.*)//+(.*) $1/$2 permanent; index index.html index.htm; client_max_body_size 20m; if ($request_method ~* OPTIONS){ return 403; } location ~ ^/NginxStatus/ { stub_status on; access_log off; } location ~ ^/(\d+\.\d+\.\d+\.\d+)/ { proxy_pass http://$1:%PROXY_PORT; proxy_set_header Host $host; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "Upgrade"; } location / { client_max_body_size 20m; client_body_buffer_size 256k; proxy_connect_timeout 300; proxy_send_timeout 300; proxy_read_timeout 300; proxy_buffer_size 256k; proxy_buffers 4 256k; proxy_busy_buffers_size 256k; proxy_temp_file_write_size 256k; proxy_next_upstream error timeout invalid_header http_500 http_503 http_404; proxy_max_temp_file_size 128m; proxy_ignore_client_abort on; proxy_pass http://%MASTER_IP:%WEB_PORT; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "Upgrade"; } } ================================================ FILE: doc/devdoc/coding.md ================================================ # NOTE ## here is some thinking and notes in coding * path : scripts' path should be known by scripts to call/import other script -- use environment variables * FS_PREFIX : docklet filesystem path to put data * overlay : " modprobe overlay " to add overlay module * after reboot : * bridges lost -- it's ok, recreate it * loop device lost -- losetup /dev/loop0 BLOCK_FILE again, and lvm will get group and volume back automatically * lvm can do snapshot, image management can use lvm's snapshot -- No! lvm snapshot will use the capacity of LVM group. * cgroup memory control maybe not work. need run command below: echo 'GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"' >> /etc/default/grub && update-grub && reboot * debian don't support cpu.cfs_quota_us option in cgroup. it needs to recompile linux kernel with CONFIG_CFS_BANDWIDTH option * ip can add bridge/link/GRE, maybe we should test whether ip can replace of ovs-vsctl and brctl. ( see "man ip-link" ) * lxc.mount.entry : * do not use relevant path. use absolute path, like : lxc.mount.entry = /root/from-dir /root/rootfs/to-dir none bind 0 0 # lxc.rootfs = /root/rootfs if use relevant paht, container path will be mounted on /usr/lib/x86_64..../ , a not existed path * path of host and container should both exist. if not exist in container, it will be mounted on /usr/lib/x86_64.... * if path in container not exists, you can use option : create=dir/file, like : lxc.mount.entry = /root/from-dir /root/rootfs/to-dir none bind,create=dir 0 0 # lxc.rootfs = /root/rootfs * lxc.mount.entry : bind and rbind ( see "man mount" ) * bind means mount a part of filesystem on somewhere else of this filesystem * but bind only attachs a single filesystem. That means the submount of source directory of mount may disappear in target directory. * if you want to make submount work, use rbind option. rbind will make entire file hierarchy including submounts mounted on another place. * NOW, we use bind in container.sh. maybe it need rbind if FS_PREFIX/global/users/$USERNAME/nfs is under glusterfs mountpoint * rpc server maybe not security. anyone can call rpc method if he knows ip address. * maybe we can use "transport" option of xmlrpc.client.ServerProxy(uri, transport="http://user:pass@host:port/path") and SimpleXMLRPCRequestHandler of xmlrpc.server.SimpleXMLRPCServer(addr, requestHandler=..) to parse the rpc request and authenticate the request xmlrpc.client.ServerProxy can also support https request, it is also a security method * If we use rpc with authentication, maybe we can use http server and http request to replace rpc * frontend and backend arch: +-----------------+ Web -- Flask --HttpRest Core | +-----------------+ Now, HttpRest and Core work as backend Web and Flask work as frontend all modules are in backend Flask just dispatch urls and render web pages (Maybe Flask can be merged in Core and works as http server) (Then Flask needs to render pages, parse urls, response requests, ...) (It maybe not fine) * httprest.py : httphandler needs to call vclustermgr/nodemgr/... to handler request we need to call these classes in httphandler Way-1: init/new these classes in httphandler init function (httphandler need to init parent class) -- wrong : httpserver will create a new httphandler instance for every http request ( see /usr/lib/python3.4/socketserver.py ) Way-2: use global varibles -- Now this way * in shell, run python script or other not built-in command, the command will run in new process and new process group ( see csapp shell lab ) so, the environment variables set in shell can not be see in python/... but command like below can work : A=ab B=ba ./python.py * maybe we need to parse argvs in python some module to parse argvs : sys.argv, optparse, getopt, argparse * in shell, { command; } means run command in current shell, ";" is necessary ( command; ) means run command in sub shell * function in registered in rpc server must have return. without return, the rpc client will raise an exception * ** NEED TO BE FIX ** we add a prefix in etcdlib so when we getkey, key may be a absolute path from base url when we setkey use the key we get, etcdlib will append the absolute path to prefix, it will wrong * overlay : upperdir and workdir must in the same mount filesystem. that means we should mount LV first and then mkdir upperdir and workdir in the LV mountpoint * when use 'worker.py > log' to redirect output of python script, it will empty output of log. because python interpreter will use buffer to collect output. we can use ways below to fix this problem: stdbuf -o 0 worker.py > log # but it fail in my try. don't know why python3 -u worker.py > log # recommended, -u option of python3 print('output', flush=True) # flush option of print sys.stdout.flush() # flush by hand * CPU QUOTA should not be too small. too small it will work so slowly ================================================ FILE: doc/devdoc/config_info.md ================================================ # Info of docklet ## container info container name : username-clusterid-nodeid hostname : host-nodeid lxc config : /var/lib/lxc/username-clusterid-nodeid/config lxc rootfs : /var/lib/lxc/username-clusterid-nodeid/rootfs lxc rootfs |__ / : aufs : basefs + volume/username-clusterid-nodeid |__ /nfs : global/users/username/data |__ /etc/hosts : global/users/username/clusters/clusterid/hosts |__ /root/.ssh : global/users/username/ssh ## ETCD Table we use etcd for some configuration information of our clusters, here is some details. every cluster has a CLUSTER_NAME and all data of this cluster is put in a directory called CLUSTER_NAME in etcd just like a table. so, different cluster should has different CLUSTER_NAME. below is content of cluster info in CLUSTER_NAME 'table' in etcd: key token random code token for checking whether master and workers has the same global filesystem dir machines ... info of physical clusters dir machines/allnodes ip:ok record all nodes, for recovery and checks dir machines/runnodes ip: ? record running node for this start up. when startup: ETCD | IP:waiting | 1. worker write worker-ip:waiting 2. master update IP:init-mode | IP:init-mode | 3. worker init itself by init-mode | IP:work | 4. worker finish init and update IP:work 5. master add workerip and update IP:ok | IP:ok | key service/master master-ip key service/mode new,recovery start mode of cluster key vcluster/nextid ID next available ID ## filesystem here is the path and content description of docklet filesystem FS_PREFIX |__ global/users/{username} | |__ clusters/clustername : clusterid, cluster size, status, containers, ... in json format | |__ hosts/id.hosts : ip host-nodeid host-nodeid.clustername | |__ data : direcroty in distributed filesystem for user to put his data | |__ ssh : ssh keys | |__ local |__ docklet-storage : loop file for lvm |__ basefs : base image |__ volume / { username-clusterid-nodeid } : upper layer of container ## vcluster files ### hosts file:(raw) IP-0 host-0 host-0.clustername IP-1 host-1 host-1.clustername ... ### info file:(json) { clusterid: ID , status: stopped/running , size: size , containers: [ { containername: lxc_name, hostname: hostname, ip: lxc_ip, host: host_ip }, { containername: lxc_name, hostname: hostname, ip: lxc_ip, host: host_ip }, ... ] } ================================================ FILE: doc/devdoc/network-arch.md ================================================ # Architecture of Network ## Architecture of containers networks In current version, to avoid VLAN ID using up, docklet employs a new architecture of containers networks. According to the new architecture, users' networks are exclusive, while the network were shared by all users before. And the new architecture gets rid of VLAN, so it solves the problem of VLAN ID using up. The architecture is shown as follows: ![](./ovs_arch.png) There are some points to describe the architecture: 1.Each user has an unique and exclusive virtual network. The container inside the network communicates with outside via gateway. 2.If there is a container in the host, then there will be a user's OVS bridge. Each user's container will connect to user's OVS bridge by Veth Pair. A user's OVS bridge will be named after "docklet-br-". 3.Each user's network is star topology, each host on which there is no gateway will connect to the host on which the user's gateway is by GRE tunnel. Thus, there may be many GRE tunnels between two hosts(Each GRE tunnels belongs to different user.), Docklet takes user's id as keys to distinguish from each other. 4.OVS bridge and GRE tunnels are created and destroyed dynamically, which means that network including bridge and GRE tunnels is created only when user starts the container and is destroyed by calling '/conf/lxc-script/lxc-ifdown' script only when user stops the container. 5.There are two modes to set up gateways: distributed or centralized. Centralized gateways is the default mode and it will set up the gateways only on Master host, while distributed gateways mode will set up gateways on different workers, just like the picture shown above. NAT/iptables in Linux Kernel is needed when container communicate with outside network via gateway. ## Processing users' requests (Workspace requests) The picture of processing user's requests will show the whole architecture of Docklet. The process is shown as follows, firstly, these are the requests to Workspace: ![](./workspace_requests.png) ## Processing users' requests (Other requests) Other requests. ![](./other_requests.png) ================================================ FILE: doc/devdoc/networkmgr.md ================================================ # Network Manager ## About 网络管理是为docklet提供网络管理的模块。 关于需求,主要有两点: * 一个中心管理池,按 网络段(IP/CIDR) 给用户分配网络池 * 很多用户网络池,按 一个或者几个网络地址 给用户的cluster分配网络地址 ## Data Structure 面对这两种需求,设计了两种数据结构来管理网络地址。 * 区间池 / interval pool : 分配、回收 网络段 interval pool 中的元素为区间,其由很多个区间组成。 一个朴素的 区间池 是这样的 : interval pool : [A1,A2],[B1,B2],[C1,C2],...[X1,X2] 每次申请一段地址的时候,从上述区间中选择一个区间分配,并将该区间中剩余部分放回区间池 而考虑到 网络段(IP/CIDR) 是 2 的幂的结构,所以可以将区间池进一步设计成如下结构: interval pool: ... ... cidr=16 : [A1,A2], [A3,A4], ... cidr=17 : [B1,B2], [B3,B4], ... cidr=18 : [C1,C2], [C3,C4], ... ... ... 上述结构还可以进一步优化,因为 每一个区间的结尾地址可以通过开始地址和CIDR算出来,所以每个区间只需要写一个起始地址就可以了 所以: interval pool: ... ... cidr=16 : A1, A3, ... cidr=17 : B1, B3, ... cidr=18 : C1, C3, ... ... ... 而其中,每一个元素,比如 A1,其实代表的是一个区间 [A1, A1+2^16-1] 这种基于2的幂的区间设计的好处是可以方便的进行 分配 和 合并 区间,操作起来更加高效。 * 枚举池 / enumeration pool : 分配、回收一个、多个网络地址 enum pool 中的元素为单个网络地址,比如: enum pool : A, B, C, D, ... X ## API 操作上述两种数据结构的API,这里省略 ## Network Manager Storage Design * center : 中心池,提供 用户网络段 的分配、回收 info : IP/CIDR intervalpool : cidr16 : ... cidr17 : ... ... ... * system : 系统保留地址,为系统内部的 网络地址 提供 分配回收 info : IP/CIDR enumpool : ... * vlan/ tag= ovs-vsctl clear port tag patch 是用来连接两个网桥的,操作如下: ovs-vsctl add-br br0 ovs-vsctl add-br br1 ovs-vsctl add-port br0 patch0 -- set interface patch0 type=patch options:peer=patch1 ovs-vsctl add-port br1 patch1 -- set interface patch1 type=patch options:peer=patch0 # NOW : two bridges are connected by patch ## Note 4 一台机器上一个域的网桥只有一个,比如在 host-0 上,建两个网桥: ovs-vsctl add-br br0 ip address add 172.0.0.1/8 dev br0 ip link set br0 up ovs-vsctl add-br br1 ip address add 172.0.0.2/8 dev br1 ip link set br1 up 则,后配置的那个网桥会失效 因为系统认为,172.0.0.1/8 内的机器都应该在 br0 中 而以下配置是正确的: ovs-vsctl add-br br0 ip address add 172.0.0.1/24 dev br0 ip link set br0 up ovs-vsctl add-br br1 ip address add 172.0.1.1/24 dev br1 ip link set br1 up ## Note 5 关于网关,网桥/交换机是二层设备,网关是三层组件,我们可以将网桥连接起来,多个网桥共用一个网关 ovs-vsctl add-br br0 ip link set br0 up ovs-vsctl add-br br1 ip address add 172.0.0.1/24 dev br1 ip link set br1 up ovs-vsctl add-port br0 patch0 -- set interface patch0 type=patch options:peer=patch1 ovs-vsctl add-port br1 patch1 -- set interface patch1 type=patch options:peer=patch0 # lxc config : # ip -- 172.0.0.11/24 # gateway -- 172.0.0.1 # lxc.network.veth.pair -- base , base is connected on br0 lxc-start -f container.conf -n base -F -- /bin/bash # NOW : lxc network is running ok ## Note 6 基于多个网桥实现VLAN ### 方案一 ovs-vsctl add-br br0 ip link set br0 up ovs-vsctl add-br br1 ip address add 172.0.0.1/24 dev br1 ip link set br1 up ovs-vsctl add-port br0 patch0 -- set interface patch0 type=patch options:peer=patch1 ovs-vsctl add-port br1 patch1 -- set interface patch1 type=patch options:peer=patch0 # lxc config : # ip -- 172.0.0.11/24 # gateway -- 172.0.0.1 # lxc.network.veth.pair -- base , base is connected on br0 lxc-start -f container.conf -n base -F -- /bin/bash # NOW : lxc network is running ok ## above is the same as before ovs-vsctl set port base tag=5 ovs-vsctl set port patch0 tag=5 # NOW : lxc network is running ok # ARCH +-----------------------+ +----------------------+ | br0 | | br1 : 172.0.0.1/24 | +--+-----tag=5---tag=5--+ +---+-------+----------+ | | | patch | | | | +-------------------+ | | | | internal base:172.0.0.11/24 internal (gateway:172.0.0.1) # flow : base --> patch --> br1/internal * 方案可行 * 但是,每个 VLAN 需要一个网关 ### 方案二 (不可行) # ARCH +-------------------------------------------------------------+ | br0 | +--+-----tag=5---tag=5---------+-----tag=6---tag=6---------+--+ | | | +-----+ | | | +-----+ | | | +--| br1 |--+ | +--| br2 |--+ | | +-----+ | +-----+ internal base1:172.0.0.11/24 base2:172.0.0.12/24 # flow 1 : base1 --> br1 --> internal # flow 2 : base1 --> br1 --> br2 --> base2 * 方案不可行,因为上面的 flow 可以使得 base1、base2 在二层通信,无法隔离 ## Note 7 上述可行方案的简化版 ### 简化版一 ovs-vsctl add-br br0 ip link set br0 up # add a fake bridge connected to br0 with vlan tag=5 ovs-vsctl add-br fakebr br0 5 ip address add 172.0.0.1/24 dev fakebr ip link set fakebr up # lxc config: # ip : 172.0.0.11/24 # gateway : 172.0.0.1/24 # lxc.network.veth.pair -- base , base is connected on br0 lxc-start -f container.conf -n base -F -- /bin/bash ovs-vsctl set port base tag=5 # ARCH +-----------------------+ | br0 | +--+-----tag=5---tag=5--+ | | | | | fakebr:172.0.0.1/24 | | internal base:172.0.0.11/24 (gateway:172.0.0.1) # flow : base --> fakebr ### 简化版二 ovs-vsctl add-br br0 ip link set br0 up # add an internal interface for vlan ovs-vsctl add-port br0 vlanif tag=5 -- set interface vlanif type=internal ip address add 172.0.0.1/24 dev vlanif ip link set vlanif up # lxc config: # ip : 172.0.0.11/24 # gateway : 172.0.0.1/24 # lxc.network.veth.pair -- base , base is connected on br0 lxc-start -f container.conf -n base -F -- /bin/bash ovs-vsctl set port base tag=5 # ARCH +-----------------------+ | br0 | +--+-----tag=5---tag=5--+ | | | | | vlanif:172.0.0.1/24 | | internal base:172.0.0.11/24 (gateway:172.0.0.1) # flow : base --> vlanif ### 简化版一 & 简化版二 使用 ovs-vsctl show 查看的时候,上述两个版本显示的信息是一样的,说明 fakebr 其实本质上可能就是一个 internal interface 其实,方案一中,对 br1 的 IP(172.0.0.1/24)的配置,其实就是对 br1 的 internal 的 interface 的配置,所以其实多余的网桥不是必须的,而 interface 才是真正需要的。 而,internal interface 相当于是连接着本地Linux的虚拟网卡,这块网卡的另一端连着OVS的虚拟网桥。 而,Linux 的网络栈又管理着物理网卡、虚拟网卡,以及对这些网卡的包进行转发、路由等处理。 似乎,Linux 的网络栈又成了一个大的交换机/网桥,上面连接着 internal interface 和 物理网卡。 ## Note 8 基于上述的实践和探索,其实 **我们需要给一个VLAN配置一个可以出去的网关、网卡。** 那么,我们一个简单可行的方案可以这样: +------------------------------------------------------------------------------+ | bridge | | <------- VLAN ID=5 ---------> <---- VLAN ID=6 ------> | +--+-----tag=5---tag=5------------tag=5-------------tag=6-------------tag=6----+ | | | | | | | | lxc-2:172.0.0.12/24 | | | internal | (gateway:172.0.0.1) | | | | | | | lxc-1:172.0.0.11/24 gw5:172.0.0.1/24 lxc-3:172.0.1.11/24 gw6:172.0.1.1/24 (gateway:172.0.0.1) internal (gateway:172.0.1.1) internal | | | | +----------- NAT / iptables --------+ |||| |||| \\\/// \\// \/ # end ================================================ FILE: doc/devdoc/proxy-control.md ================================================ # Some Note for configurable-http-proxy usage ## intsall sudo apt-get install nodejs nodejs-legacy npm sudo npm install -g configurable-http-proxy ## start configurable-http-proxy -h : for help configurable-http-proxy --ip IP \ --port PORT \ --api-ip IP \ --api-port PORT \ --default-target http://IP:PORT \ --log-level debug/info/warn/error default ip:port is 0.0.0.0:8000, default api-ip:api-port is localhost:8001 ## control route table ### get route table * without token: curl http://localhost:8001/api/routes * with token: curl -H "Authorization: token TOKEN" http://localhost:8001/api/routes ### add/set route table * without token: curl -XPOST --data '{"target":"http://TARGET-IP:TARGET-PORT"}' http://localhost:8001/api/routes/PROXY-URL * with token: curl -H "Authorization: token TOKEN" -XPOST --data '{"target":"http://TARGET-IP:TARGET-PORT"}' http://localhost:8001/api/routes/PROXY-URL ### delete route table line * without token: curl -XDELETE http://localhost:8001/api/routes/PROXY-URL * with token: curl -H "Authorization: token TOKEN" -XDELETE http://localhost:8001/api/routes/PROXY-URL ================================================ FILE: doc/devdoc/startup.md ================================================ # startup mode ## new mode #### step 1 : data clean etcd table write token init etcd table clean global directory of user clusters #### step 2 : nodemgr init network wait for all nodes starts |_____ listen node joins IP:waiting <--- worker starts update etcd ----> IP:init-mode ---> worker init |____ stop all containers |____ umount mountpoint, delete lxc files, delete LV |____ delete VG, umount loop dev, delete loop file |____ init loop file, loop dev, create VG add node to list <--- IP:work <---- init done, begin work check all nodes begin work #### step 3 : vclustermgr Nothing to do ## recovery mode #### step 1 : data write token init some of etcd table #### step 2 : nodemgr init network wait for all nodes starts |_____ listen node joins IP:waiting <--- worker starts update etcd ----> IP:init-mode ---> worker init |____ check loop file, loop dev, VG |____ check all containers and mountpoint add node to list <--- IP:work <---- init done, begin work check all nodes begin work #### step 3 : vclustermgr recover vclusters:some need start ---------------> recover containers: some need start ================================================ FILE: doc/devguide/devguide.md ================================================ # Docklet Development Guide on GitHub This document is intended for GitHubers to contribute for Docklet System. ## Introduction of Docklet Development Workflow We use fork and pull request workflow to push forward Docklet Project. ![Docklet Workflow](images/workflow.png) ## Step by Step ### Prepare Before work, we need to prepare our working repository. These actions should be executed just once. ##### Step 1 : fork Open https://github.com/unias/docklet in your browser and click **Fork** button on the top-right corner. ##### Step 2 : clone & config * clone docklet from your github repository ``` git clone https://github.com/YourName/docklet.git ``` * config your local repository ``` # add unias/docklet as your upstream git remote add upstream https://github.com/unias/docklet.git # set push to upstream not work git remote set-url --push upstream no_push ``` ### Work This part is about the steps of making contributions to Docklet by pull request. #### Work : Begin ##### Step 3 : fetch Fetch the latest code from **upstream(unias/docklet)** ``` git fetch upstream ``` ##### Step 4 : branch Create new branch for your work ``` git checkout -b BranchName upstream/master ``` This is not the step you must do and you can work on local master branch. But we recommend you follow these steps. Using branch to develop new features fits git. #### Work : Work Now you can focus on your work by **commit** and **push**. ##### Step 5 : commit & commit Commit is commit. Nothing to say. ##### Step 6 : push & push Push your work to **your own Github repository** by **BranchName** ``` git push origin BranchName ``` #### Work : End After you complete work of this feature, you maybe want to create a pull request to unias/docklet. Please follow steps below. ##### Step 7 : fetch Fetch the latest code from **unias/docklet** ``` git fetch upstream ``` ##### Step 8 : merge Merge upstream's latest code to your working branch ``` git merge upstream/master ``` Please ensure that you are on your working branch. If conflict happens, resolve it and commit. ##### Step 9 : push Push to your github repository by BranchName. ``` git push origin BranchName ``` ##### Step 10 : pull request Open https://github.com/YourName/docklet, click **New pull request** and select your working **BranchName** to create the pull request. ## Tips ##### local master After you fetch upstream code, you can move forward your local master branch to upstream/master. And push your github repository master branch to update. ``` git fetch upstream git checkout master git merge upstream/master git push origin master ``` ##### pretty git log or git log with GUI You can config your git log command with pretty format. ``` git config --global alias.lg "log --graph --color --pretty=format:' %Cred%h %Creset/ %<(10,trunc)%Cblue%an%Creset | %<(60,trunc)%s | %cr %Cred%d' --remotes --branches" ``` Now, type **git lg** to see what happens. Of course, you can use GUI with git. **gitg** is a good choice. It shows log of git very friendly. ##### understand git log git log has much information. You should understand the log info of git. This can help you know how to move forward your work. Especially the reference of branches : upstream/master, HEAD, master, origin/master, other branches. ##### graphs/network of github The Graphs/Network of Github is very useful. With this, you can know whether you can create a pull request without conflict. Open https://github.com/unias/docklet/network in your browser and see the network graph of docklet. ================================================ FILE: doc/example/example-LogisticRegression.py ================================================ # import package import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model, datasets %matplotlib inline # load data : we only use target==0 and target==1 (2 types classify) and feature 0 and feature 2 () iris = datasets.load_iris() X = iris.data[iris.target!=2][:, [0,2]] Y = iris.target[iris.target!=2] h = .02 # step size in the mesh logreg = linear_model.LogisticRegression(C=1e5) logreg.fit(X, Y) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) #plt.figure(1, figsize=(4, 3)) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) ================================================ FILE: meter/connector/master.py ================================================ #!/usr/bin/python3 import socket, select, errno, threading, os class master_connector: tcp_port = 1727 max_minions = 256 conn = {} epoll_fd = select.epoll() def establish_vswitch(ovsname): os.system('ovs-vsctl del-br ovs-%s >/dev/null 2>&1' % ovsname) os.system('ovs-vsctl add-br ovs-%s' % ovsname) os.system('brctl addif ovs-bridge ovs-%s >/dev/null 2>&1' % ovsname) os.system('ip link set ovs-system up') os.system('ip link set ovs-%s up' % ovsname) def build_gre_conn(ovsname, ipaddr): name = ipaddr.replace('.','_') os.system('ovs-vsctl add-port ovs-%s gre-%s -- set interface gre-%s type=gre options:remote_ip=%s 2>/dev/null' % (ovsname, name, name, ipaddr)) def break_gre_conn(ovsname, ipaddr): name = ipaddr.replace('.','_') os.system('ovs-vsctl del-port ovs-%s gre-%s 2>/dev/null' % (ovsname, name)) def close_connection(fd): master_connector.epoll_fd.unregister(fd) master_connector.conn[fd][0].close() addr = master_connector.conn[fd][1] master_connector.conn.pop(fd) master_connector.break_gre_conn('master', addr) def do_message_response(input_buffer): assert(input_buffer == b'ack') return b'ack' def start(): thread = threading.Thread(target = master_connector.run_forever, args = []) thread.setDaemon(True) thread.start() return thread def run_forever(): listen_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) listen_fd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listen_fd.bind(('', master_connector.tcp_port)) listen_fd.listen(master_connector.max_minions) master_connector.epoll_fd.register(listen_fd.fileno(), select.EPOLLIN) datalist = {} master_connector.establish_vswitch('master') try: while True: epoll_list = master_connector.epoll_fd.poll() for fd, events in epoll_list: if fd == listen_fd.fileno(): fileno, addr = listen_fd.accept() fileno.setblocking(0) master_connector.epoll_fd.register(fileno.fileno(), select.EPOLLIN | select.EPOLLET) master_connector.conn[fileno.fileno()] = (fileno, addr[0]) master_connector.build_gre_conn('master', addr[0]) elif select.EPOLLIN & events: datas = b'' while True: try: data = master_connector.conn[fd][0].recv(10) if not data and not datas: master_connector.close_connection(fd) break else: datas += data except socket.error as msg: if msg.errno == errno.EAGAIN: try: datalist[fd] = master_connector.do_message_response(datas) master_connector.epoll_fd.modify(fd, select.EPOLLET | select.EPOLLOUT) except: master_connector.close_connection(fd) else: master_connector.close_connection(fd) break elif select.EPOLLOUT & events: sendLen = 0 while True: sendLen += master_connector.conn[fd][0].send(datalist[fd][sendLen:]) if sendLen == len(datalist[fd]): break master_connector.epoll_fd.modify(fd, select.EPOLLIN | select.EPOLLET) elif select.EPOLLHUP & events: master_connector.close_connection(fd) else: continue finally: os.system('ovs-vsctl del-br ovs-master >/dev/null 2>&1') ================================================ FILE: meter/connector/minion.py ================================================ #!/usr/bin/python3 import socket, time, threading, os class minion_connector: def connect(server_ip): from connector.master import master_connector connected = True while True: try: fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) fd.connect((server_ip, master_connector.tcp_port)) connected = True print("[info]", "connected to master.") master_connector.establish_vswitch('minion') master_connector.build_gre_conn('minion', server_ip) while True: data = b'ack' if fd.send(data) != len(data): break readData = fd.recv(1024) time.sleep(0.5) fd.close() except socket.error as e: master_connector.break_gre_conn('minion', server_ip) if connected: print("[info]", "non-connected with master.") except Exception as e: pass finally: if connected: os.system('ovs-vsctl del-br ovs-minion >/dev/null 2>&1') connected = False time.sleep(1) def start(server_ip): thread = threading.Thread(target = minion_connector.connect, args = [server_ip]) thread.setDaemon(True) thread.start() return thread ================================================ FILE: meter/daemon/http.py ================================================ import json, cgi, threading from http.server import BaseHTTPRequestHandler, HTTPServer class base_http_handler(BaseHTTPRequestHandler): def load_module(self): return None def do_POST(self): try: default_exception = 'unsupported request.' success = True data = None length = self.headers['content-length'] if length == None: length = self.headers['content-length'] = 0 if int(length) > (1<<12): raise Exception("data too large") http_form = cgi.FieldStorage(fp=self.rfile, headers=self.headers,environ={'REQUEST_METHOD':'POST','CONTENT_TYPE': "text/html"}) form = {} for item in http_form: try: value = http_form[item].file.read().strip() except: value = http_form[item].value try: value = value.decode() except: pass form[item] = value parts = self.path.split('/', 2) if len(parts) != 3: raise Exception(default_exception) [null, version, path] = parts pymodule = self.load_module() + '_' + version module = __import__('daemon.' + pymodule) handler = module.__dict__[pymodule].__dict__['case_handler'] method = path.replace('/', '_') if not hasattr(handler, method): raise Exception(default_exception) data = handler.__dict__[method](form, self.handler_class.args) except Exception as e: success = False data = {"reason": str(e)} finally: self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(json.dumps({"success": success, "data": data}).encode()) self.wfile.write("\n".encode()) return class master_http_handler(base_http_handler): http_port = 1728 def load_module(self): self.handler_class = master_http_handler return 'master' class minion_http_handler(base_http_handler): http_port = 1729 def load_module(self): self.handler_class = minion_http_handler return 'minion' class http_daemon_listener: def __init__(self, handler_class, args = None): handler_class.args = args self.handler_class = handler_class def listen(self): server = HTTPServer(('', self.handler_class.http_port), self.handler_class) server.serve_forever() ================================================ FILE: meter/daemon/master_v1.py ================================================ import subprocess, os def http_client_post(ip, port, url, entries = {}): import urllib.request, urllib.parse, json url = url if not url.startswith('/') else url[1:] response = urllib.request.urlopen('http://%s:%d/%s' % (ip, port, url), urllib.parse.urlencode(entries).encode()) obj = json.loads(response.read().decode().strip()) response.close() return obj class case_handler: # [Order-by] lexicographic order # curl -L -X POST http://0.0.0.0:1728/v1/minions/list def minions_list(form, args): minions = [] for item in args.conn: minions.append(args.conn[item][1]) return {"minions": minions} # curl -L -X POST -F mem=4096 -F cpu=2 http://0.0.0.0:1728/v1/resource/allocation def resource_allocation(form, args): mem = int(form['mem']) cpu = int(form['cpu']) candidates = {} from daemon.http import minion_http_handler for item in args.conn: addr = args.conn[item][1] obj = http_client_post(addr, minion_http_handler.http_port, '/v1/system/memsw/available') if obj['success'] and obj['data']['Mbytes'] >= mem: candidates[addr] = obj['data'] if len(candidates) <= 0: raise Exception("no minions") else: from policy.allocate import candidates_selector one = candidates_selector.select(candidates) return {"recommend": one} # curl -L -X POST -F user=docklet http://0.0.0.0:1728/v1/user/live/add def user_live_add(form, args): if not os.path.exists('/var/lib/docklet/global/users/%s' % form['user']): return False subprocess.getoutput('echo live > /var/lib/docklet/global/users/%s/status' % form['user']) return True # curl -L -X POST -F user=docklet http://0.0.0.0:1728/v1/user/live/remove def user_live_remove(form, args): subprocess.getoutput('rm -f /var/lib/docklet/global/users/%s/status' % form['user']) return True # curl -L -X POST http://0.0.0.0:1728/v1/user/live/list def user_live_list(form, args): return subprocess.getoutput('ls -1 /var/lib/docklet/global/users/*/status 2>/dev/null | awk -F\/ \'{print $(NF-1)\'}').split() ================================================ FILE: meter/daemon/minion_v1.py ================================================ from intra.system import system_manager from intra.billing import billing_manager from intra.cgroup import cgroup_manager from policy.quota import * from intra.smart import smart_controller class case_handler: # [Order-by] lexicographic order # curl -L -X POST -F uuid=docklet-1-0 http://0.0.0.0:1729/v1/billing/increment def billing_increment(form, args): return billing_manager.fetch_increment_and_clean(form['uuid']) # curl -L -X POST http://0.0.0.0:1729/v1/cgroup/container/list def cgroup_container_list(form, args): return cgroup_manager.get_cgroup_containers() # curl -L -X POST -F policy=etime_rev_policy http://0.0.0.0:1729/v1/smart/quota/policy def smart_quota_policy(form, args): msg = 'success' try: smart_controller.set_policy(eval(form['policy'])) except Exception as e: msg = e return {'message': msg} # curl -L -X POST -F uuid=n1 http://0.0.0.0:1729/v1/cgroup/container/limit def cgroup_container_limit(form, args): return cgroup_manager.get_container_limit(form['uuid']) # curl -L -X POST -F uuid=n1 http://0.0.0.0:1729/v1/cgroup/container/sample def cgroup_container_sample(form, args): return cgroup_manager.get_container_sample(form['uuid']) # curl -L -X POST http://0.0.0.0:1729/v1/system/loads def system_loads(form, args): return system_manager.get_system_loads() # curl -L -X POST http://0.0.0.0:1729/v1/system/memsw/available def system_memsw_available(form, args): return system_manager.get_available_memsw() # curl -L -X POST -F size=16 http://0.0.0.0:1729/v1/system/swap/extend def system_swap_extend(form, args): return system_manager.extend_swap(int(form['size'])) # curl -L -X POST http://0.0.0.0:1729/v1/system/swap/clear def system_swap_clear(form, args): return system_manager.clear_all_swaps() # curl -L -X POST http://0.0.0.0:1729/v1/system/total/physical/memory def system_total_physical_memory(form, args): return system_manager.get_total_physical_memory_for_containers() ''' # curl -X POST -F uuid=n1 http://0.0.0.0:1729/v1/blacklist/add def blacklist_add(form): exists = form['uuid'] in smart_controller.blacklist if not exists: smart_controller.blacklist.add(form['uuid']) return {"changed": not exists} # curl -X POST -F uuid=n1 http://0.0.0.0:1729/v1/blacklist/remove def blacklist_remove(form): exists = form['uuid'] in smart_controller.blacklist if exists: smart_controller.blacklist.remove(form['uuid']) return {"changed": exists} # curl -X POST http://0.0.0.0:1729/v1/blacklist/show def blacklist_show(form): blacklist = [] for item in smart_controller.blacklist: blacklist.append(item) return blacklist ''' ================================================ FILE: meter/intra/billing.py ================================================ import subprocess, time, os from intra.system import system_manager class billing_manager: history_book = {} def on_lxc_acct_usage(uuid, prev, curr, interval): cpu_gen = max(0, curr['cpu_sample'] - prev['cpu_sample']) >> 20 # in ms mem_gen = ((curr['mem_phys_sample'] + prev['mem_phys_sample']) * interval) >> 11 # in kbytes try: os.makedirs('%s/%s' % (system_manager.db_prefix, uuid)) except: pass with open('%s/%s/usage' % (system_manager.db_prefix, uuid), 'a') as fp: fp.write('%d %d\n' % (cpu_gen, mem_gen)) def add_usage_sample(uuid, sample, interval): if uuid in billing_manager.history_book: billing_manager.on_lxc_acct_usage(uuid, billing_manager.history_book[uuid], sample, interval) billing_manager.history_book[uuid] = sample def clean_dead_node(uuid): if uuid in billing_manager.history_book: billing_manager.history_book.pop(uuid) def fetch_increment_and_clean(uuid): cpu_acct = 0.0 mem_acct = 0.0 cnt_acct = 0 try: fetch_path = '%s/%s/%f' % (system_manager.db_prefix, uuid, time.time()) os.rename('%s/%s/usage' % (system_manager.db_prefix, uuid), fetch_path) with open(fetch_path, 'r') as fp: line = fp.readline() while line != '': [cpu, mem] = line.split() line = fp.readline() cnt_acct += 1 cpu_acct += float(cpu) mem_acct += float(mem) os.remove(fetch_path) except: pass return {"cpu_acct": cpu_acct, "mem_acct": mem_acct, "cnt_acct": cnt_acct} ================================================ FILE: meter/intra/cgroup.py ================================================ import subprocess, os class cgroup_controller: def read_value(group, uuid, item): path = cgroup_manager.__default_prefix__ % (group, uuid, item) if not os.path.exists(path): raise Exception('read: container "%s" not found!' % uuid) with open(path, 'r') as file: value = file.read() return value.strip() def write_value(group, uuid, item, value): path = cgroup_manager.__default_prefix__ % (group, uuid, item) if not os.path.exists(path): raise Exception('write: container "%s" not found!' % uuid) try: with open(path, 'w') as file: file.write(str(value)) except: pass class cgroup_manager: __prefix_docker__ = '/sys/fs/cgroup/%s/system.slice/docker-%s.scope/%s' __prefix_lxc__ = '/sys/fs/cgroup/%s/lxc/%s/%s' __prefix_lxcinit__ = '/sys/fs/cgroup/%s/init.scope/lxc/%s/%s' def set_default_memory_limit(limit): cgroup_manager.__default_memory_limit__ = limit def set_cgroup_prefix(prefix = __prefix_lxc__): cgroup_manager.__default_prefix__ = prefix def auto_detect_prefix(): cgroup_manager.__default_prefix__ = cgroup_manager.__prefix_docker__ if len(cgroup_manager.get_cgroup_containers()) > 0: return cgroup_manager.__default_prefix__ = cgroup_manager.__prefix_lxcinit__ if len(cgroup_manager.get_cgroup_containers()) > 0: return cgroup_manager.__default_prefix__ = cgroup_manager.__prefix_lxc__ if len(cgroup_manager.get_cgroup_containers()) > 0: return # print("[info]", "set cgroup prefix to %s" % cgroup_manager.__default_prefix__) def get_cgroup_containers(): containers = subprocess.getoutput("find %s -type d 2>/dev/null | awk -F\/ '{print $(NF-1)}'" % (cgroup_manager.__default_prefix__ % ('cpu', '*', '.'))).split() uuids = [] for item in containers: if item.startswith('docker-') and item.endswith('.scope') and len(item) > 64: uuids.append(item[7:-6]) else: uuids.append(item) return uuids def get_container_pid(uuid): return int(cgroup_controller.read_value('cpu', uuid, 'tasks').split()[0]) def get_container_sample(uuid): mem_page_sample = int(cgroup_controller.read_value('memory', uuid, 'memory.memsw.usage_in_bytes')) mem_phys_sample = int(cgroup_controller.read_value('memory', uuid, 'memory.usage_in_bytes')) cpu_sample = int(cgroup_controller.read_value('cpu', uuid, 'cpuacct.usage')) pids_sample = int(cgroup_controller.read_value('pids', uuid, 'pids.current')) container_pid = cgroup_manager.get_container_pid(uuid) from intra.system import system_manager real_time = system_manager.get_proc_etime(container_pid) return {"cpu_sample": cpu_sample, "pids_sample": pids_sample, "mem_page_sample": mem_page_sample, "mem_phys_sample": mem_phys_sample, "pid": container_pid, "real_time": real_time} def get_container_limit(uuid): mem_phys_quota = int(cgroup_controller.read_value('memory', uuid, 'memory.limit_in_bytes')) mem_page_quota = int(cgroup_controller.read_value('memory', uuid, 'memory.memsw.limit_in_bytes')) cpu_shares = int(cgroup_controller.read_value('cpu', uuid, 'cpu.shares')) cpu_quota = int(cgroup_controller.read_value('cpu', uuid, 'cpu.cfs_quota_us')) cpu_quota = cpu_quota if cpu_quota >= 0 else -1 pids_quota = cgroup_controller.read_value('pids', uuid, 'pids.max') pids_quota = int(pids_quota) if pids_quota != 'max' else -1 return {"cpu_quota": cpu_quota, "cpu_shares": cpu_shares, "mem_phy_quota": mem_phys_quota, "mem_page_quota": mem_page_quota, "pids_quota": pids_quota} def get_container_oom_status(uuid): [_x, idle, _y, oom] = cgroup_controller.read_value('memory', uuid, 'memory.oom_control').split() return (idle == '1', oom == '1') def set_container_oom_idle(uuid, idle): cgroup_controller.write_value('memory', uuid, 'memory.oom_control', 1 if idle else 0) def protect_container_oom(uuid): cgroup_controller.write_value('memory', uuid, 'memory.oom_control', 1) data = cgroup_manager.get_container_limit(uuid) if data["mem_page_quota"] >= 9223372036854771712: memory_limit_in_bytes = cgroup_manager.__default_memory_limit__ << 30 mem_phy_quota = min(data["mem_phy_quota"], memory_limit_in_bytes) mem_page_quota = memory_limit_in_bytes cgroup_controller.write_value('freezer', uuid, 'freezer.state', 'FROZEN') cgroup_controller.write_value('memory', uuid, 'memory.limit_in_bytes', mem_phy_quota) cgroup_controller.write_value('memory', uuid, 'memory.limit_in_bytes', mem_phy_quota) cgroup_controller.write_value('memory', uuid, 'memory.memsw.limit_in_bytes', mem_page_quota) cgroup_controller.write_value('freezer', uuid, 'freezer.state', 'THAWED') def set_container_physical_memory_limit(uuid, Mbytes, freeze = False): if freeze: cgroup_controller.write_value('freezer', uuid, 'freezer.state', 'FROZEN') memory_limit = int(max(0, Mbytes)) << 20 cgroup_controller.write_value('memory', uuid, 'memory.limit_in_bytes', memory_limit) if freeze: cgroup_controller.write_value('freezer', uuid, 'freezer.state', 'THAWED') def set_container_cpu_priority_limit(uuid, ceof): cpu_scaling = min(1024, 10 + int(1024 * ceof)) cgroup_controller.write_value('cpu', uuid, 'cpu.shares', cpu_scaling) ================================================ FILE: meter/intra/smart.py ================================================ import subprocess, time, os, threading, math from intra.system import system_manager from intra.cgroup import cgroup_manager from intra.billing import billing_manager class smart_controller: def set_policy(policy): smart_controller.policy = policy def start(interval = 4): thread = threading.Thread(target = smart_controller.smart_control_forever, args = [interval]) thread.setDaemon(True) thread.start() return thread def smart_control_forever(interval): last_live = [] while True: time.sleep(interval) try: mem_usage_mapping = {} live = cgroup_manager.get_cgroup_containers() for item in live: try: last_live.remove(item) except: pass try: cgroup_manager.protect_container_oom(item) sample = cgroup_manager.get_container_sample(item) mem_usage_mapping[item] = math.ceil(sample['mem_page_sample'] * 1e-6) billing_manager.add_usage_sample(item, sample, interval) except: pass for item in last_live: billing_manager.clean_dead_node(item) last_live = live is_ready = True memory_available = system_manager.get_available_memsw() if memory_available['Mbytes'] <= 0: size_in_gb = int(math.ceil(-memory_available['Mbytes'] / 1024 / 16) * 16) print("[warning]", 'overloaded containers, auto-extending %d G memsw.' % size_in_gb) system_manager.extend_swap(size_in_gb) total_score = 0.0 score_mapping = {} for item in live: score = max(1e-8, smart_controller.policy.get_score_by_uuid(item)) score_mapping[item] = score print(item, "(score/cpu)", score) total_score += score # CPU Scoring for item in live: ceof = score_mapping[item] / total_score cgroup_manager.set_container_cpu_priority_limit(item, ceof) # Iterative Memory Scoring free_mem = system_manager.get_total_physical_memory_for_containers()['Mbytes'] local_nodes = live mem_alloc = {} for item in live: mem_alloc[item] = 0 while free_mem > 0 and len(local_nodes) > 0: excess_mem = 0 next_local_nodes = [] for item in local_nodes: mem_alloc[item] += int(math.floor(free_mem * score_mapping[item] / total_score)) if mem_alloc[item] >= mem_usage_mapping[item]: excess_mem += mem_alloc[item] - mem_usage_mapping[item] mem_alloc[item] = mem_usage_mapping[item] else: next_local_nodes.append(item) free_mem = excess_mem local_nodes = next_local_nodes for item in live: mem_alloc[item] += int(math.floor(free_mem * score_mapping[item] / total_score)) cgroup_manager.set_container_physical_memory_limit(item, mem_alloc[item]) print(item, "(malloc:usage)", mem_alloc[item], mem_usage_mapping[item]) if len(live) > 0: print("-------------------------------") except: pass # echo "8:0 1000" > /sys/fs/cgroup/blkio/lxc/docklet-1-0/blkio.throttle.write_bps_device # https://www.kernel.org/doc/Documentation/devices.txt # while true; do clear; cat /sys/fs/cgroup/blkio/lxc/docklet-1-0/blkio.throttle.io_service_bytes; sleep 0.5; done # hugetlb, net_cls, net_prio, /sbin/tc ================================================ FILE: meter/intra/system.py ================================================ import subprocess, time, os from intra.cgroup import cgroup_manager class system_manager: db_prefix = '.' def set_db_prefix(prefix): system_manager.db_prefix = prefix try: os.makedirs(prefix) except: pass def clear_all_swaps(): subprocess.getoutput('swapoff -a') subprocess.getoutput('losetup -D') def extend_swap(size): if size < 0: (mem_free, mem_total) = system_manager.get_memory_sample() size = (mem_total + mem_total // 8) // 1024 nid = 128 while subprocess.getoutput("cat /proc/swaps | grep cg-loop | awk '{print $1}' | awk -F\- '{print $NF}' | grep %d$" % nid) != "": nid = nid + 1 start_time = time.time() # setup os.system('dd if=/dev/zero of=/tmp/cg-swap-%d bs=1G count=0 seek=%d >/dev/null 2>&1' % (nid, size)) os.system('mknod -m 0660 /dev/cg-loop-%d b 7 %d >/dev/null 2>&1' % (nid, nid)) os.system('losetup /dev/cg-loop-%d /tmp/cg-swap-%d >/dev/null 2>&1' % (nid, nid)) os.system('mkswap /dev/cg-loop-%d >/dev/null 2>&1' % nid) success = os.system('swapon /dev/cg-loop-%d >/dev/null 2>&1' % nid) == 0 # detach # os.system('swapoff /dev/cg-loop-%d >/dev/null 2>&1' % nid) # os.system('losetup -d /dev/cg-loop-%d >/dev/null 2>&1' % nid) # os.system('rm -f /dev/cg-loop-%d /tmp/cg-swap-%d >/dev/null 2>&1' % (nid, nid)) end_time = time.time() return {"setup": success, "time": end_time - start_time } def get_cpu_sample(): [a, b, c, d] = subprocess.getoutput("cat /proc/stat | grep ^cpu\ | awk '{print $2, $3, $4, $6}'").split() cpu_time = int(a) + int(b) + int(c) + int(d) return (cpu_time, time.time()) def get_memory_sample(): mem_free = int(subprocess.getoutput("awk '{if ($1==\"MemAvailable:\") print $2}' /proc/meminfo 2>/dev/null")) // 1024 mem_total = int(subprocess.getoutput("awk '{if ($1==\"MemTotal:\") print $2}' /proc/meminfo 2>/dev/null")) // 1024 return (mem_free, mem_total) def get_swap_sample(): swap_free = int(subprocess.getoutput("awk '{if ($1==\"SwapFree:\") print $2}' /proc/meminfo 2>/dev/null")) // 1024 swap_total = int(subprocess.getoutput("awk '{if ($1==\"SwapTotal:\") print $2}' /proc/meminfo 2>/dev/null")) // 1024 return (swap_free, swap_total) def get_system_loads(): if 'last_cpu_sample' not in system_manager.__dict__: system_manager.last_cpu_sample = system_manager.get_cpu_sample() time.sleep(1) cpu_sample = system_manager.get_cpu_sample() (mem_free, mem_total) = system_manager.get_memory_sample() (swap_free, swap_total) = system_manager.get_swap_sample() ncpus = int(subprocess.getoutput("grep processor /proc/cpuinfo | wc -l")) cpu_free = ncpus - (cpu_sample[0] - system_manager.last_cpu_sample[0]) * 0.01 / (cpu_sample[1] - system_manager.last_cpu_sample[1]) cpu_free = 0.0 if cpu_free <= 0.0 else cpu_free system_manager.last_cpu_sample = cpu_sample return {"mem_free": mem_free, "mem_total": mem_total, "swap_free": swap_free, "swap_total": swap_total, "cpu_free": cpu_free, "cpu_total": ncpus } def get_proc_etime(pid): fmt = subprocess.getoutput("ps -A -opid,etime | grep '^ *%d' | awk '{print $NF}'" % pid).strip() if fmt == '': return -1 parts = fmt.split('-') days = int(parts[0]) if len(parts) == 2 else 0 fmt = parts[-1] parts = fmt.split(':') hours = int(parts[0]) if len(parts) == 3 else 0 parts = parts[len(parts)-2:] minutes = int(parts[0]) seconds = int(parts[1]) return ((days * 24 + hours) * 60 + minutes) * 60 + seconds def get_available_memsw(): total_mem_limit = 0 total_mem_used = 0 sysloads = system_manager.get_system_loads() live = cgroup_manager.get_cgroup_containers() for item in live: try: sample = cgroup_manager.get_container_sample(item) limit = cgroup_manager.get_container_limit(item) total_mem_limit += limit["mem_page_quota"] total_mem_used += sample["mem_page_sample"] except: pass total_mem_limit >>= 20 total_mem_used = (total_mem_used + (1<<20) - 1) >> 20 available_mem_resource = sysloads['mem_free'] + \ sysloads['swap_free'] - total_mem_limit + total_mem_used return {"Mbytes": available_mem_resource, "physical": sysloads['mem_free'], "cpu_free": sysloads['cpu_free']} def get_total_physical_memory_for_containers(): total_mem_used = 0 sysloads = system_manager.get_system_loads() live = cgroup_manager.get_cgroup_containers() for item in live: try: sample = cgroup_manager.get_container_sample(item) total_mem_used += sample["mem_page_sample"] except: pass total_mem_used = (total_mem_used + (1<<20) - 1) >> 20 total_physical_memory_for_containers = sysloads['mem_free'] + total_mem_used return {"Mbytes": total_physical_memory_for_containers} ================================================ FILE: meter/main.py ================================================ #!/usr/bin/python3 ######################################## # Boot for Local: # sudo ./main (or: sudo ./main [master-ipaddr]) # ######################################## # Usage for Local: # curl -F uuid="lxc-name1" http://0.0.0.0:1729/v1/cgroup/container/sample # import time, sys, signal, json, subprocess, os if __name__ == '__main__': if not subprocess.getoutput('lsb_release -r -s 2>/dev/null').startswith('16.04'): raise Exception('Ubuntu 16.04 LTS is required.') if not os.path.exists('/sys/fs/cgroup/memory/memory.memsw.usage_in_bytes'): raise Exception('Please append "swapaccount=1" to kernel.') if subprocess.getoutput('whoami') != 'root': raise Exception('Root privilege is required.') from daemon.http import * if len(sys.argv) == 1: sys.argv.append('disable-network') def signal_handler(signal, frame): if sys.argv[1] == 'master': subprocess.getoutput('ovs-vsctl del-br ovs-master >/dev/null 2>&1') else: subprocess.getoutput('ovs-vsctl del-br ovs-minion >/dev/null 2>&1') sys.exit(0) signal.signal(signal.SIGINT, signal_handler) if sys.argv[1] != 'master': # for minions from intra.cgroup import cgroup_manager cgroup_manager.auto_detect_prefix() cgroup_manager.set_default_memory_limit(4) from intra.system import system_manager system_manager.set_db_prefix('/var/lib/docklet/meter') # system_manager.extend_swap(32) if sys.argv[1] != 'disable-network': from connector.minion import minion_connector minion_connector.start(sys.argv[1]) else: print("(No network mode)") from policy.quota import identify_policy from intra.smart import smart_controller smart_controller.set_policy(identify_policy) smart_controller.start() print("Minion REST Daemon Starts Listening ..") http = http_daemon_listener(minion_http_handler) http.listen() else: # for master: sudo ./main master from connector.master import master_connector master_connector.start() print("Master REST Daemon Starts Listening ..") http = http_daemon_listener(master_http_handler, master_connector) http.listen() ================================================ FILE: meter/policy/allocate.py ================================================ class candidates_selector: def select(candidates): return max(candidates, key=lambda addr: candidates[addr]['cpu_free']) ================================================ FILE: meter/policy/quota.py ================================================ from intra.system import system_manager from intra.cgroup import cgroup_manager import subprocess class identify_policy: def get_score_by_uuid(uuid): return 1.0 class etime_rev_policy(identify_policy): def get_score_by_uuid(uuid): pid = cgroup_manager.get_container_pid(uuid) etime = system_manager.get_proc_etime(pid) return 1.0 / (1.0 + etime) class mem_usage_policy(identify_policy): def get_score_by_uuid(uuid): sample = cgroup_manager.get_container_sample(uuid) return sample["mem_page_sample"] class mem_quota_policy(identify_policy): def get_score_by_uuid(uuid): sample = cgroup_manager.get_container_limit(uuid) return sample["mem_page_quota"] class cpu_usage_policy(identify_policy): def get_score_by_uuid(uuid): sample = cgroup_manager.get_container_sample(uuid) return sample["cpu_sample"] class cpu_usage_rev_policy(identify_policy): def get_score_by_uuid(uuid): sample = cgroup_manager.get_container_sample(uuid) return 1024 * 1024 / (1.0 + sample["cpu_sample"]) class cpu_speed_policy(identify_policy): def get_score_by_uuid(uuid): sample = cgroup_manager.get_container_sample(uuid) pid = cgroup_manager.get_container_pid(uuid) etime = system_manager.get_proc_etime(pid) return sample["cpu_sample"] / etime class user_state_policy(identify_policy): def get_score_by_uuid(uuid): user = uuid.split('-')[0] online = subprocess.getoutput('cat /var/lib/docklet/global/users/%s/status 2>/dev/null' % user) == 'live' return 10.0 if online else 1.0 ================================================ FILE: prepare.sh ================================================ #!/bin/bash ################################################## # before-start.sh # when you first use docklet, you should run this script to # check and prepare the environment # *important* : you need run this script again and again till success ################################################## if [[ "`whoami`" != "root" ]]; then echo "FAILED: Require root previledge !" > /dev/stderr exit 1 fi # install packages that docklet needs (in ubuntu) # some packages' name maybe different in debian apt-get install -y lxc lxcfs lxc-templates lvm2 bridge-utils curl exim4 openssh-server openvswitch-switch apt-get install -y python3 python3-netifaces python3-flask python3-flask-sqlalchemy python3-pampy python3-httplib2 python3-pip apt-get install -y python3-psutil python3-flask-migrate python3-paramiko apt-get install -y python3-lxc apt-get install -y python3-requests python3-suds apt-get install -y nodejs npm apt-get install -y etcd apt-get install -y glusterfs-client attr apt-get install -y nginx pip3 install Flask-WTF apt-get install -y gdebi-core pip3 install grpcio grpcio-tools googleapis-common-protos #add ip forward echo "net.ipv4.ip_forward=1" >>/etc/sysctl.conf sysctl -p # check cgroup control #which cgm &> /dev/null || { echo "FAILED : cgmanager is required, please install cgmanager" && exit 1; } #cpucontrol=$(cgm listkeys cpu) #[[ -z $(echo $cpucontrol | grep cfs_quota_us) ]] && echo "FAILED : cpu.cfs_quota_us of cgroup is not supported, you may need to recompile kernel" && exit 1 #memcontrol=$(cgm listkeys memory) #if [[ -z $(echo $memcontrol | grep limit_in_bytes) ]]; then # echo "FAILED : memory.limit_in_bytes of cgroup is not supported" # echo "Try : " # echo -e " echo 'GRUB_CMDLINE_LINUX=\"cgroup_enable=memory swapaccount=1\"' >> /etc/default/grub; update-grub; reboot" > /dev/stderr # echo "Info : if not success, you may need to recompile kernel" # exit 1 #fi # check and install configurable-http-proxy which configurable-http-proxy &>/dev/null || { npm config set registry https://registry.npm.taobao.org && npm install -g configurable-http-proxy; } which configurable-http-proxy &>/dev/null || { echo "Error: install configurable-http-proxy failed, you should try again" && exit 1; } echo "" [[ -f conf/docklet.conf ]] || { echo "Generating docklet.conf from template" && cp conf/docklet.conf.template conf/docklet.conf; } [[ -f web/templates/home.html ]] || { echo "Generating HomePage from home.template" && cp web/templates/home.template web/templates/home.html; } FS_PREFIX=/opt/docklet . conf/docklet.conf export FS_PREFIX mkdir -p $FS_PREFIX/global mkdir -p $FS_PREFIX/local/ echo "directory FS_PREFIX (${FS_PREFIX}) have been created" if [[ ! -d $FS_PREFIX/local/basefs && ! $1 = "withoutfs" ]]; then mkdir -p $FS_PREFIX/local/basefs echo "Generating basefs" # wget -P $FS_PREFIX/local http://iwork.pku.edu.cn:1616/basefs-0.11.tar.bz2 && tar xvf $FS_PREFIX/local/basefs-0.11.tar.bz2 -C $FS_PREFIX/local/ > /dev/null [ $? != "0" ] && echo "Generate basefs failed, please download it from http://unias.github.io/docklet/download to FS_PREFIX/local and then extract it using root. (defalut FS_PRERIX is /opt/docklet)" fi echo "Some packagefs can be downloaded from http://unias.github.io/docklet.download" echo "you can download the packagefs and extract it to FS_PREFIX/local using root. (default FS_PREFIX is /opt/docklet" echo "" echo "All preparation installations are done." echo "****************************************" echo "* Please Read Lines Below Before Start *" echo "****************************************" echo "" echo "you may want to custom home page of docklet. Please modify web/templates/home.html" echo "Next, make sure exim4 can deliver mail out. To enable, run:" echo "dpkg-reconfigure exim4-config" echo "select internet site" echo "" echo "Then start docklet as described in README.md" ================================================ FILE: src/master/beansapplicationmgr.py ================================================ #!/usr/bin/python3 ''' This module consists of three parts: 1.send_beans_email: a function to send email to remind users of their beans. 2.ApplicationMgr: a class that will deal with users' requests about beans application. 3.ApprovalRobot: a automatic robot to examine and approve users' applications. ''' import threading,datetime,random,time from utils.model import db,User,ApplyMsg from master.userManager import administration_required from utils import env import smtplib from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.header import Header from master.settings import settings # send email to remind users of their beans def send_beans_email(to_address, username, beans): email_from_address = settings.get('EMAIL_FROM_ADDRESS') if (email_from_address in ['\'\'', '\"\"', '']): return #text = 'Dear '+ username + ':\n' + ' Your beans in docklet are less than' + beans + '.' text = '

Dear '+ username + ':

' text += '''

      Your beans in docklet are %d now.

      If your beans are less than or equal to 0, all your worksapces will be stopped.

      Please apply for more beans to keep your workspaces running by following link:

      %s/beans/application/


      Note: DO NOT reply to this email!



Docklet Team, SEI, PKU

''' % (env.getenv("PORTAL_URL"), beans, env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL")) text += '

'+ str(datetime.datetime.now()) + '

' text += '' subject = 'Docklet beans alert' msg = MIMEMultipart() textmsg = MIMEText(text,'html','utf-8') msg['Subject'] = Header(subject, 'utf-8') msg['From'] = email_from_address msg['To'] = to_address msg.attach(textmsg) s = smtplib.SMTP() s.connect() s.sendmail(email_from_address, to_address, msg.as_string()) s.close() # a class that will deal with users' requests about beans application. class ApplicationMgr: def __init__(self): # create database try: ApplyMsg.query.all() except: db.create_all() # user apply for beans def apply(self,username,number,reason): user = User.query.filter_by(username=username).first() if user is not None and user.beans >= 1000: return [False, "Your beans must be less than 1000."] if int(number) < 100 or int(number) > 5000: return [False, "Number field must be between 100 and 5000!"] applymsgs = ApplyMsg.query.filter_by(username=username).all() lasti = len(applymsgs) - 1 # the last index, the last application is also the latest application. if lasti >= 0 and applymsgs[lasti].status == "Processing": return [False, "You already have a processing application, please be patient."] # store the application into the database applymsg = ApplyMsg(username,number,reason) db.session.add(applymsg) db.session.commit() return [True,""] # get all applications of a user def query(self,username): applymsgs = ApplyMsg.query.filter_by(username=username).all() ans = [] for msg in applymsgs: ans.append(msg.ch2dict()) return ans # get all unread applications @administration_required def queryUnRead(self,*,cur_user): applymsgs = ApplyMsg.query.filter_by(status="Processing").all() ans = [] for msg in applymsgs: ans.append(msg.ch2dict()) return {"success":"true","applymsgs":ans} # agree an application @administration_required def agree(self,msgid,*,cur_user): applymsg = ApplyMsg.query.get(msgid) if applymsg is None: return {"success":"false","message":"Application doesn\'t exist."} applymsg.status = "Agreed" user = User.query.filter_by(username=applymsg.username).first() if user is not None: # update users' beans user.beans += applymsg.number db.session.commit() return {"success":"true"} # reject an application @administration_required def reject(self,msgid,*,cur_user): applymsg = ApplyMsg.query.get(msgid) if applymsg is None: return {"success":"false","message":"Application doesn\'t exist."} applymsg.status = "Rejected" db.session.commit() return {"success":"true"} # a automatic robot to examine and approve users' applications. class ApprovalRobot(threading.Thread): def __init__(self,maxtime=3600): threading.Thread.__init__(self) self.stop = False self.interval = 20 self.maxtime = maxtime # The max time that users may wait for from 'processing' to 'agreed' def stop(self): self.stop = True def run(self): while not self.stop: # query all processing applications applymsgs = ApplyMsg.query.filter_by(status="Processing").all() for msg in applymsgs: secs = (datetime.datetime.now() - msg.time).seconds #ranint = random.randint(self.interval,self.maxtime) if secs >= self.maxtime: msg.status = "Agreed" user = User.query.filter_by(username=msg.username).first() if user is not None: # update users'beans user.beans += msg.number db.session.commit() time.sleep(self.interval) ================================================ FILE: src/master/bugreporter.py ================================================ from master.settings import settings import smtplib from utils.log import logger from utils import env from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.header import Header from datetime import datetime import json def send_bug_mail(username, bugmessage): #admin_email_address = env.getenv('ADMIN_EMAIL_ADDRESS') nulladdr = ['\'\'', '\"\"', ''] email_from_address = settings.get('EMAIL_FROM_ADDRESS') admin_email_address = settings.get('ADMIN_EMAIL_ADDRESS') logger.info("receive bug from %s: %s" % (username, bugmessage)) if (email_from_address in nulladdr or admin_email_address in nulladdr): return {'success': 'false'} #text = 'Dear '+ username + ':\n' + ' Your account in docklet has been activated' text = '

Dear '+ 'admin' + ':

' text += '''

      A bug has been report by %s.


  %s  

      Please check it !



Docklet Team, SEI, PKU

''' % (username, bugmessage) text += '

'+ str(datetime.utcnow()) + '

' text += '' subject = 'A bug of Docklet has been reported' if admin_email_address[0] == '"': admins_addr = admin_email_address[1:-1].split(" ") else: admins_addr = admin_email_address.split(" ") alladdr="" for addr in admins_addr: alladdr = alladdr+addr+", " alladdr=alladdr[:-2] msg = MIMEMultipart() textmsg = MIMEText(text,'html','utf-8') msg['Subject'] = Header(subject, 'utf-8') msg['From'] = email_from_address msg['To'] = alladdr msg.attach(textmsg) s = smtplib.SMTP() s.connect() try: s.sendmail(email_from_address, admins_addr, msg.as_string()) except Exception as e: logger.error(e) s.close() return {'success':'true'} ================================================ FILE: src/master/cloudmgr.py ================================================ #!/usr/bin/python3 from io import StringIO import os,sys,subprocess,time,re,datetime,threading,random,shutil from utils.model import db, Image from master.deploy import * import json from utils.log import logger from utils import env import requests fspath = env.getenv('FS_PREFIX') class AliyunMgr(): def __init__(self): self.AcsClient = __import__('aliyunsdkcore.client', fromlist=["AcsClient"]) self.Request = __import__('aliyunsdkecs.request.v20140526', fromlist=[ "CreateInstanceRequest", "StopInstanceRequest", "DescribeInstancesRequest", "DeleteInstanceRequest", "StartInstanceRequest", "DescribeInstancesRequest", "AllocateEipAddressRequest", "AssociateEipAddressRequest"]) def loadClient(self): if not os.path.exists(fspath+"/global/sys/cloudsetting.json"): currentfilepath = os.path.dirname(os.path.abspath(__file__)) templatefilepath = currentfilepath + "/../tools/cloudsetting.aliyun.template.json" shutil.copyfile(templatefilepath,fspath+"/global/sys/cloudsetting.json") logger.error("please modify the setting file first") return False try: settingfile = open(fspath+"/global/sys/cloudsetting.json", 'r') self.setting = json.loads(settingfile.read()) settingfile.close() self.clt = self.AcsClient.AcsClient(self.setting['AccessKeyId'],self.setting['AccessKeySecret'], self.setting['RegionId']) logger.info("load CLT of Aliyun success") return True except Exception as e: logger.error(e) return False def createInstance(self): request = self.Request.CreateInstanceRequest.CreateInstanceRequest() request.set_accept_format('json') request.add_query_param('RegionId', self.setting['RegionId']) if 'ZoneId' in self.setting and not self.setting['ZoneId'] == "": request.add_query_param('ZoneId', self.setting['ZoneId']) if 'VSwitchId' in self.setting and not self.setting['VSwitchId'] == "": request.add_query_param('VSwitchId', self.setting['VSwitchId']) request.add_query_param('ImageId', 'ubuntu_16_0402_64_20G_alibase_20170818.vhd') request.add_query_param('InternetMaxBandwidthOut', 1) request.add_query_param('InstanceName', 'docklet_tmp_worker') request.add_query_param('HostName', 'worker-tmp') request.add_query_param('SystemDisk.Size', int(self.setting['SystemDisk.Size'])) request.add_query_param('InstanceType', self.setting['InstanceType']) request.add_query_param('Password', self.setting['Password']) response = self.clt.do_action_with_exception(request) logger.info(response) instanceid=json.loads(bytes.decode(response))['InstanceId'] return instanceid def startInstance(self, instanceid): request = self.Request.StartInstanceRequest.StartInstanceRequest() request.set_accept_format('json') request.add_query_param('InstanceId', instanceid) response = self.clt.do_action_with_exception(request) logger.info(response) def createEIP(self): request = self.Request.AllocateEipAddressRequest.AllocateEipAddressRequest() request.set_accept_format('json') request.add_query_param('RegionId', self.setting['RegionId']) response = self.clt.do_action_with_exception(request) logger.info(response) response=json.loads(bytes.decode(response)) eipid=response['AllocationId'] eipaddr=response['EipAddress'] return [eipid, eipaddr] def associateEIP(self, instanceid, eipid): request = self.Request.AssociateEipAddressRequest.AssociateEipAddressRequest() request.set_accept_format('json') request.add_query_param('AllocationId', eipid) request.add_query_param('InstanceId', instanceid) response = self.clt.do_action_with_exception(request) logger.info(response) def getInnerIP(self, instanceid): request = self.Request.DescribeInstancesRequest.DescribeInstancesRequest() request.set_accept_format('json') response = self.clt.do_action_with_exception(request) instances = json.loads(bytes.decode(response))['Instances']['Instance'] for instance in instances: if instance['InstanceId'] == instanceid: return instance['NetworkInterfaces']['NetworkInterface'][0]['PrimaryIpAddress'] return json.loads(bytes.decode(response))['Instances']['Instance'][0]['VpcAttributes']['PrivateIpAddress']['IpAddress'][0] def isStarted(self, instanceids): request = self.Request.DescribeInstancesRequest.DescribeInstancesRequest() request.set_accept_format('json') response = self.clt.do_action_with_exception(request) instances = json.loads(bytes.decode(response))['Instances']['Instance'] for instance in instances: if instance['InstanceId'] in instanceids: if not instance['Status'] == "Running": return False return True def rentServers(self,number): instanceids=[] eipids=[] eipaddrs=[] for i in range(int(number)): instanceids.append(self.createInstance()) time.sleep(2) time.sleep(10) for i in range(int(number)): [eipid,eipaddr]=self.createEIP() eipids.append(eipid) eipaddrs.append(eipaddr) time.sleep(2) masterip=env.getenv('ETCD').split(':')[0] for i in range(int(number)): self.associateEIP(instanceids[i],eipids[i]) time.sleep(2) time.sleep(5) for instanceid in instanceids: self.startInstance(instanceid) time.sleep(2) time.sleep(10) while not self.isStarted(instanceids): time.sleep(10) time.sleep(5) return [masterip, eipaddrs] def addNode(self): if not self.loadClient(): return {'success':'false'} [masterip, eipaddrs] = self.rentServers(1) threads = [] for eip in eipaddrs: thread = threading.Thread(target = deploy, args=(eip,masterip,'root',self.setting['Password'],self.setting['VolumeName'])) thread.setDaemon(True) thread.start() threads.append(thread) for thread in threads: thread.join() return {'success':'true'} def addNodeAsync(self): thread = threading.Thread(target = self.addNode) thread.setDaemon(True) thread.start() class EmptyMgr(): def addNodeAsync(self): logger.error("current cluster does not support scale out") return False class CloudMgr(): def getSettingFile(self): if not os.path.exists(fspath+"/global/sys/cloudsetting.json"): currentfilepath = os.path.dirname(os.path.abspath(__file__)) templatefilepath = currentfilepath + "/../tools/cloudsetting.aliyun.template.json" shutil.copyfile(templatefilepath,fspath+"/global/sys/cloudsetting.json") settingfile = open(fspath+"/global/sys/cloudsetting.json", 'r') setting = settingfile.read() settingfile.close() return {'success':'true', 'result':setting} def modifySettingFile(self, setting): if setting == None: logger.error("setting is None") return {'success':'false'} settingfile = open(fspath+"/global/sys/cloudsetting.json", 'w') settingfile.write(setting) settingfile.close() return {'success':'true'} def __init__(self): if env.getenv("ALLOW_SCALE_OUT") == "True": self.engine = AliyunMgr() else: self.engine = EmptyMgr() ================================================ FILE: src/master/deploy.py ================================================ #!/usr/bin/python3 import paramiko, time, os from utils.log import logger from utils import env def myexec(ssh,command): stdin,stdout,stderr = ssh.exec_command(command) endtime = time.time() + 3600 while not stdout.channel.eof_received: time.sleep(2) if time.time() > endtime: stdout.channel.close() logger.error(command + ": fail") return # for line in stdout.readlines(): # if line is None: # time.sleep(5) # else: # print(line) def deploy(ipaddr,masterip,account,password,volumename): while True: try: transport = paramiko.Transport((ipaddr,22)) transport.connect(username=account,password=password) break except Exception as e: time.sleep(2) pass sftp = paramiko.SFTPClient.from_transport(transport) currentfilepath = os.path.dirname(os.path.abspath(__file__)) deployscriptpath = currentfilepath + "/../tools/docklet-deploy.sh" sftp.put(deployscriptpath,'/root/docklet-deploy.sh') sftp.put('/etc/hosts', '/etc/hosts') transport.close() ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) while True: try: ssh.connect(ipaddr, username = account, password = password, timeout = 300) break except Exception as e: time.sleep(2) pass myexec(ssh,"sed -i 's/%MASTERIP%/" + masterip + "/g' /root/docklet-deploy.sh") myexec(ssh,"sed -i 's/%VOLUMENAME%/" + volumename + "/g' /root/docklet-deploy.sh") myexec(ssh,'chmod +x /root/docklet-deploy.sh') myexec(ssh,'/root/docklet-deploy.sh') ssh.close() return ================================================ FILE: src/master/httprest.py ================================================ #!/usr/bin/python3 # load environment variables in the beginning # because some modules need variables when import # for example, userManager/model.py import sys if sys.path[0].endswith("master"): sys.path[0] = sys.path[0][:-6] from flask import Flask, request # must first init loadenv from utils import tools, env # default CONFIG=/opt/docklet/local/docklet-running.conf config = env.getenv("CONFIG") tools.loadenv(config) # second init logging # must import logger after initlogging, ugly from utils.log import initlogging initlogging("docklet-master") from utils.log import logger import os import http.server, cgi, json, sys, shutil, traceback import xmlrpc.client from socketserver import ThreadingMixIn from utils import etcdlib, imagemgr from master import nodemgr, vclustermgr, notificationmgr, lockmgr, cloudmgr, jobmgr, taskmgr from utils.logs import logs from master import userManager, beansapplicationmgr, monitor, sysmgr, network, releasemgr from worker.monitor import History_Manager import threading import requests from utils.nettools import portcontrol #default EXTERNAL_LOGIN=False external_login = env.getenv('EXTERNAL_LOGIN') if (external_login == 'TRUE'): from userDependence import external_auth userpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT')) G_userip = env.getenv("USER_IP") def post_to_user(url = '/', data={}): return requests.post(userpoint+url,data=data).json() app = Flask(__name__) from functools import wraps def login_required(func): @wraps(func) def wrapper(*args, **kwargs): logger.info ("get request, path: %s" % request.path) token = request.form.get("token", None) if (token == None): logger.info ("get request without token, path: %s" % request.path) return json.dumps({'success':'false', 'message':'user or key is null'}) result = post_to_user("/authtoken/", {'token':token}) if result.get('success') == 'true': username = result.get('username') beans = result.get('beans') else: return result #if (cur_user == None): # return json.dumps({'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'}) return func(username, beans, request.form, *args, **kwargs) return wrapper def auth_key_required(func): @wraps(func) def wrapper(*args,**kwargs): key_1 = env.getenv('AUTH_KEY') key_2 = request.form.get("auth_key",None) #logger.info(str(ip) + " " + str(G_userip)) if key_2 is not None and key_1 == key_2: return func(*args, **kwargs) else: return json.dumps({'success':'false','message': 'auth_key is required!'}) return wrapper def beans_check(func): @wraps(func) def wrapper(*args, **kwargs): beans = args[1] if beans <= 0: return json.dumps({'success':'false','message':'user\'s beans are less than or equal to zero!'}) else: return func(*args, **kwargs) return wrapper @app.route("/isalive/", methods = ['POST']) @login_required def isalive(user, beans, form): return json.dumps({'success':'true'}) @app.route("/logs/list/", methods=['POST']) @login_required def logs_list(user, beans, form): user_group = post_to_user('/user/selfQuery/', {'token': request.form.get("token", None)}).get('data', None).get('group', None) return json.dumps(logs.list(user_group = user_group)) @app.route("/logs/get/", methods=['POST']) @login_required def logs_get(user, beans, form): user_group = post_to_user('/user/selfQuery/', {'token': request.form.get("token", None)}).get('data', None).get('group', None) return json.dumps(logs.get(user_group = user_group, filename = form.get('filename', ''))) @app.route("/cluster/create/", methods=['POST']) @login_required @beans_check def create_cluster(user, beans, form): global G_vclustermgr global G_ulockmgr clustername = form.get('clustername', None) if (clustername == None): return json.dumps({'success':'false', 'message':'clustername is null'}) G_ulockmgr.acquire(user) try: image = {} image['name'] = form.get("imagename", None) image['type'] = form.get("imagetype", None) image['owner'] = form.get("imageowner", None) user_info = post_to_user("/user/selfQuery/", {'token':form.get("token")}) user_info = json.dumps(user_info) logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name'])) setting = { 'cpu': form.get('cpuSetting'), 'memory': form.get('memorySetting'), 'disk': form.get('diskSetting') } res = post_to_user("/user/usageInc/", {'token':form.get('token'), 'setting':json.dumps(setting)}) status = res.get('success') result = res.get('result') if not status: return json.dumps({'success':'false', 'action':'create cluster', 'message':result}) [status, result] = G_vclustermgr.create_cluster(clustername, user, image, user_info, setting) if status: return json.dumps({'success':'true', 'action':'create cluster', 'message':result}) else: post_to_user("/user/usageRecover/", {'token':form.get('token'), 'setting':json.dumps(setting)}) return json.dumps({'success':'false', 'action':'create cluster', 'message':result}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/cluster/scaleout/", methods=['POST']) @login_required @beans_check def scaleout_cluster(user, beans, form): global G_vclustermgr global G_ulockmgr clustername = form.get('clustername', None) logger.info ("scaleout: %s" % form) if (clustername == None): return json.dumps({'success':'false', 'message':'clustername is null'}) G_ulockmgr.acquire(user) try: logger.info("handle request : scale out %s" % clustername) image = {} image['name'] = form.get("imagename", None) image['type'] = form.get("imagetype", None) image['owner'] = form.get("imageowner", None) user_info = post_to_user("/user/selfQuery/", {'token':form.get("token")}) user_info = json.dumps(user_info) setting = { 'cpu': form.get('cpuSetting'), 'memory': form.get('memorySetting'), 'disk': form.get('diskSetting') } res = post_to_user("/user/usageInc/", {'token':form.get('token'), 'setting':json.dumps(setting)}) status = res.get('success') result = res.get('result') if not status: return json.dumps({'success':'false', 'action':'scale out', 'message': result}) [status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info, setting) if status: return json.dumps({'success':'true', 'action':'scale out', 'message':result}) else: post_to_user("/user/usageRecover/", {'token':form.get('token'), 'setting':json.dumps(setting)}) return json.dumps({'success':'false', 'action':'scale out', 'message':result}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/cluster/scalein/", methods=['POST']) @login_required def scalein_cluster(user, beans, form): global G_vclustermgr global G_ulockmgr clustername = form.get('clustername', None) if (clustername == None): return json.dumps({'success':'false', 'message':'clustername is null'}) G_ulockmgr.acquire(user) try: logger.info("handle request : scale in %s" % clustername) containername = form.get("containername", None) [status, usage_info] = G_vclustermgr.get_clustersetting(clustername, user, containername, False) if status: post_to_user("/user/usageRelease/", {'token':form.get('token'), 'cpu':usage_info['cpu'], 'memory':usage_info['memory'],'disk':usage_info['disk']}) [status, result] = G_vclustermgr.scale_in_cluster(clustername, user, containername) if status: return json.dumps({'success':'true', 'action':'scale in', 'message':result}) else: return json.dumps({'success':'false', 'action':'scale in', 'message':result}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/cluster/start/", methods=['POST']) @login_required @beans_check def start_cluster(user, beans, form): global G_vclustermgr global G_ulockmgr clustername = form.get('clustername', None) if (clustername == None): return json.dumps({'success':'false', 'message':'clustername is null'}) G_ulockmgr.acquire(user) try: user_info = post_to_user("/user/selfQuery/", {'token':form.get("token")}) logger.info ("handle request : start cluster %s" % clustername) [status, result] = G_vclustermgr.start_cluster(clustername, user, user_info) if status: return json.dumps({'success':'true', 'action':'start cluster', 'message':result}) else: return json.dumps({'success':'false', 'action':'start cluster', 'message':result}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/cluster/stop/", methods=['POST']) @login_required def stop_cluster(user, beans, form): global G_vclustermgr global G_ulockmgr clustername = form.get('clustername', None) if (clustername == None): return json.dumps({'success':'false', 'message':'clustername is null'}) G_ulockmgr.acquire(user) try: logger.info ("handle request : stop cluster %s" % clustername) [status, result] = G_vclustermgr.stop_cluster(clustername, user) if status: return json.dumps({'success':'true', 'action':'stop cluster', 'message':result}) else: return json.dumps({'success':'false', 'action':'stop cluster', 'message':result}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/cluster/delete/", methods=['POST']) @login_required def delete_cluster(user, beans, form): global G_vclustermgr global G_ulockmgr clustername = form.get('clustername', None) if (clustername == None): return json.dumps({'success':'false', 'message':'clustername is null'}) G_ulockmgr.acquire(user) try: logger.info ("handle request : delete cluster %s" % clustername) user_info = post_to_user("/user/selfQuery/" , {'token':form.get("token")}) user_info = json.dumps(user_info) [status, usage_info] = G_vclustermgr.get_clustersetting(clustername, user, "all", True) if status: post_to_user("/user/usageRelease/", {'token':form.get('token'), 'cpu':usage_info['cpu'], 'memory':usage_info['memory'],'disk':usage_info['disk']}) [status, result] = G_vclustermgr.delete_cluster(clustername, user, user_info) if status: return json.dumps({'success':'true', 'action':'delete cluster', 'message':result}) else: return json.dumps({'success':'false', 'action':'delete cluster', 'message':result}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/cluster/info/", methods=['POST']) @login_required def info_cluster(user, beans, form): global G_vclustermgr clustername = form.get('clustername', None) if (clustername == None): return json.dumps({'success':'false', 'message':'clustername is null'}) logger.info ("handle request : info cluster %s" % clustername) [status, result] = G_vclustermgr.get_clusterinfo(clustername, user) if status: return json.dumps({'success':'true', 'action':'info cluster', 'message':result}) else: return json.dumps({'success':'false', 'action':'info cluster', 'message':result}) @app.route("/cluster/list/", methods=['POST']) @login_required def list_cluster(user, beans, form): global G_vclustermgr logger.info ("handle request : list clusters for %s" % user) [status, clusterlist] = G_vclustermgr.list_clusters(user) if status: return json.dumps({'success':'true', 'action':'list cluster', 'clusters':clusterlist}) else: return json.dumps({'success':'false', 'action':'list cluster', 'message':clusterlist}) @app.route("/cluster/stopall/",methods=['POST']) @auth_key_required def stopall_cluster(): global G_vclustermgr global G_ulockmgr user = request.form.get('username',None) if user is None: return json.dumps({'success':'false', 'message':'User is required!'}) G_ulockmgr.acquire(user) try: logger.info ("handle request : stop all clusters for %s" % user) [status, clusterlist] = G_vclustermgr.list_clusters(user) if status: for cluster in clusterlist: G_vclustermgr.stop_cluster(cluster,user) return json.dumps({'success':'true', 'action':'stop all cluster'}) else: return json.dumps({'success':'false', 'action':'stop all cluster', 'message':clusterlist}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/cluster/flush/", methods=['POST']) @login_required def flush_cluster(user, beans, form): global G_vclustermgr clustername = form.get('clustername', None) if (clustername == None): return json.dumps({'success':'false', 'message':'clustername is null'}) from_lxc = form.get('from_lxc', None) G_vclustermgr.flush_cluster(user,clustername,from_lxc) return json.dumps({'success':'true', 'action':'flush'}) @app.route("/cluster/save/", methods=['POST']) @login_required def save_cluster(user, beans, form): global G_vclustermgr clustername = form.get('clustername', None) if (clustername == None): return json.dumps({'success':'false', 'message':'clustername is null'}) imagename = form.get("image", None) description = form.get("description", None) containername = form.get("containername", None) isforce = form.get("isforce", None) G_ulockmgr.acquire(user) try: if not isforce == "true": [status,message] = G_vclustermgr.image_check(user,imagename) if not status: return json.dumps({'success':'false','reason':'exists', 'message':message}) user_info = post_to_user("/user/selfQuery/", {'token':form.get("token")}) [status,message] = G_vclustermgr.create_image(user,clustername,containername,imagename,description,user_info["data"]["groupinfo"]["image"]) if status: logger.info("image has been saved") return json.dumps({'success':'true', 'action':'save'}) else: logger.debug(message) return json.dumps({'success':'false', 'reason':'exceed', 'message':message}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/admin/ulock/release/", methods=['POST']) @login_required def release_ulock(user, beans, form): global G_ulockmgr if user != 'root': return json.dumps({'success':'false', 'message':'root is required.'}) release_user = form.get("ulockname",None) if release_user is None: return json.dumps({'success':'false', 'message':'ulockname is required.'}) try: G_ulockmgr.release(release_user) except Exception as e: logger.error(traceback.format_exc()) return json.dumps({'success':'false', 'message':'fail to release lock %s' % release_user}) return json.dumps({'success':'true', 'message':'lock %s release successfully' % release_user}) @app.route("/admin/migrate_cluster/", methods=['POST']) @auth_key_required def migrate_cluster(): global G_vclustermgr global G_ulockmgr user = request.form.get('username',None) if user is None: return json.dumps({'success':'false', 'message':'User is required!'}) clustername = request.form.get('clustername', None) if (clustername == None): return json.dumps({'success':'false', 'message':'clustername is null'}) new_hosts = request.form.get('new_hosts', None) if (new_hosts == None): return json.dumps({'success':'false', 'message':'new_hosts is null'}) new_host_list = new_hosts.split(',') G_ulockmgr.acquire(user) auth_key = env.getenv('AUTH_KEY') try: logger.info ("handle request : migrate cluster to %s. user:%s clustername:%s" % (str(new_hosts), user, clustername)) res = post_to_user("/master/user/groupinfo/", {'auth_key':auth_key}) groups = json.loads(res['groups']) quotas = {} for group in groups: #logger.info(group) quotas[group['name']] = group['quotas'] rc_info = post_to_user("/master/user/recoverinfo/", {'username':user,'auth_key':auth_key}) groupname = rc_info['groupname'] user_info = {"data":{"id":rc_info['uid'],"groupinfo":quotas[groupname]}} logger.info("Migrate cluster for user(%s) cluster(%s) to new_hosts(%s). user_info(%s)" %(clustername, user, str(new_host_list), user_info)) [status,msg] = G_vclustermgr.migrate_cluster(clustername, user, new_host_list, user_info) if not status: logger.error(msg) return json.dumps({'success':'false', 'message': msg}) return json.dumps({'success':'true', 'action':'migrate_container'}) except Exception as ex: logger.error(traceback.format_exc()) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/host/migrate/", methods=['POST']) @login_required def migrate_host(user, beans, form): global G_vclustermgr global G_ulockmgr src_host = request.form.get('src_host', None) dst_host_list = request.form.getlist('dst_host_list', None) if src_host is None or dst_host_list is None: return json.dumps({'success':'false', 'message': 'src host or dst host list is null'}) [status, msg] = G_vclustermgr.migrate_host(src_host, dst_host_list, G_ulockmgr) if status: return json.dumps({'success': 'true', 'action': 'migrate_host'}) else: return json.dumps({'success': 'false', 'message': msg}) @app.route("/image/list/", methods=['POST']) @login_required def list_image(user, beans, form): global G_imagemgr images = G_imagemgr.list_images(user) return json.dumps({'success':'true', 'images': images}) @app.route("/image/updatebase/", methods=['POST']) @login_required def update_base(user, beans, form): global G_imagemgr global G_vclustermgr [success, status] = G_imagemgr.update_base_image(user, G_vclustermgr, form.get('image')) return json.dumps({'success':'true', 'message':status}) @app.route("/image/description/", methods=['POST']) @login_required def description_image(user, beans, form): global G_imagemgr image = {} image['name'] = form.get("imagename", None) image['type'] = form.get("imagetype", None) image['owner'] = form.get("imageowner", None) description = G_imagemgr.get_image_description(user,image) return json.dumps({'success':'true', 'message':description}) @app.route("/image/share/", methods=['POST']) @login_required def share_image(user, beans, form): global G_imagemgr image = form.get('image') G_ulockmgr.acquire(user) try: G_imagemgr.shareImage(user,image) return json.dumps({'success':'true', 'action':'share'}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/image/unshare/", methods=['POST']) @login_required def unshare_image(user, beans, form): global G_imagemgr image = form.get('image', None) G_ulockmgr.acquire(user) try: G_imagemgr.unshareImage(user,image) return json.dumps({'success':'true', 'action':'unshare'}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/image/delete/", methods=['POST']) @login_required def delete_image(user, beans, form): global G_imagemgr image = form.get('image', None) G_ulockmgr.acquire(user) try: G_imagemgr.removeImage(user,image) return json.dumps({'success':'true', 'action':'delete'}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/image/copy/", methods=['POST']) @login_required def copy_image(user, beans, form): global G_imagemgr global G_ulockmgr image = form.get('image', None) target = form.get('target',None) token = form.get('token',None) G_ulockmgr.acquire(user) try: res = G_imagemgr.copyImage(user,image,token,target) return json.dumps(res) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message': str(ex)}) finally: G_ulockmgr.release(user) @app.route("/image/copytarget/", methods=['POST']) @login_required @auth_key_required def copytarget_image(user, beans, form): global G_imagemgr global G_ulockmgr imagename = form.get('imagename',None) description = form.get('description',None) try: G_ulockmgr.acquire(user) res = G_imagemgr.updateinfo(user,imagename,description) return json.dumps({'success':'true', 'action':'copy image to target.'}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message':str(ex)}) finally: G_ulockmgr.release(user) @app.route("/cloud/setting/get/", methods=['POST']) @login_required def query_account_cloud(cur_user, user, form): global G_cloudmgr logger.info("handle request: cloud/setting/get/") result = G_cloudmgr.getSettingFile() return json.dumps(result) @app.route("/cloud/setting/modify/", methods=['POST']) @login_required def modify_account_cloud(cur_user, user, form): global G_cloudmgr logger.info("handle request: cloud/setting/modify/") result = G_cloudmgr.modifySettingFile(form.get('setting',None)) return json.dumps(result) @app.route("/cloud/node/add/", methods=['POST']) @login_required def add_node_cloud(user, beans, form): global G_cloudmgr logger.info("handle request: cloud/node/add/") G_cloudmgr.engine.addNodeAsync() result = {'success':'true'} return json.dumps(result) @app.route("/addproxy/", methods=['POST']) @login_required def addproxy(user, beans, form): global G_vclustermgr logger.info ("handle request : add proxy") proxy_ip = form.get("ip", None) proxy_port = form.get("port", None) clustername = form.get("clustername", None) [status, message] = G_vclustermgr.addproxy(user,clustername,proxy_ip,proxy_port) if status is True: return json.dumps({'success':'true', 'action':'addproxy'}) else: return json.dumps({'success':'false', 'message': message}) @app.route("/deleteproxy/", methods=['POST']) @login_required def deleteproxy(user, beans, form): global G_vclustermgr logger.info ("handle request : delete proxy") clustername = form.get("clustername", None) G_vclustermgr.deleteproxy(user,clustername) return json.dumps({'success':'true', 'action':'deleteproxy'}) @app.route("/port_mapping/add/", methods=['POST']) @login_required def add_port_mapping(user, beans, form): global G_vclustermgr global G_ulockmgr logger.info ("handle request : add port mapping") node_name = form.get("node_name",None) node_ip = form.get("node_ip", None) node_port = form.get("node_port", None) clustername = form.get("clustername", None) if node_name is None or node_ip is None or node_port is None or clustername is None: return json.dumps({'success':'false', 'message': 'Illegal form.'}) user_info = post_to_user("/user/selfQuery/", data = {"token": form.get("token")}) G_ulockmgr.acquire(user) try: [status, message] = G_vclustermgr.add_port_mapping(user,clustername,node_name,node_ip,node_port,user_info['data']['groupinfo']) if status is True: return json.dumps({'success':'true', 'action':'addproxy'}) else: return json.dumps({'success':'false', 'message': message}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message':str(ex)}) finally: G_ulockmgr.release(user) @app.route("/port_mapping/delete/", methods=['POST']) @login_required def delete_port_mapping(user, beans, form): global G_vclustermgr global G_ulockmgr logger.info ("handle request : delete port mapping") node_name = form.get("node_name",None) clustername = form.get("clustername", None) node_port = form.get("node_port", None) if node_name is None or clustername is None: return json.dumps({'success':'false', 'message': 'Illegal form.'}) G_ulockmgr.acquire(user) try: [status, message] = G_vclustermgr.delete_port_mapping(user,clustername,node_name,node_port) if status is True: return json.dumps({'success':'true', 'action':'addproxy'}) else: return json.dumps({'success':'false', 'message': message}) except Exception as ex: logger.error(str(ex)) return json.dumps({'success':'false', 'message':str(ex)}) finally: G_ulockmgr.release(user) @app.route("/monitor/hosts///", methods=['POST']) @login_required def hosts_monitor(user, beans, form, com_id, issue): global G_clustername logger.info("handle request: monitor/hosts") res = {} fetcher = monitor.Fetcher(com_id) if issue == 'meminfo': res['meminfo'] = fetcher.get_meminfo() elif issue == 'gpuinfo': res['gpuinfo'] = fetcher.get_gpuinfo() elif issue == 'cpuinfo': res['cpuinfo'] = fetcher.get_cpuinfo() elif issue == 'cpuconfig': res['cpuconfig'] = fetcher.get_cpuconfig() elif issue == 'diskinfo': res['diskinfo'] = fetcher.get_diskinfo() elif issue == 'osinfo': res['osinfo'] = fetcher.get_osinfo() #elif issue == 'concpuinfo': # res['concpuinfo'] = fetcher.get_concpuinfo() elif issue == 'containers': res['containers'] = fetcher.get_containers() elif issue == 'status': res['status'] = fetcher.get_status() elif issue == 'containerslist': res['containerslist'] = fetcher.get_containerslist() elif issue == 'containersinfo': res = [] conlist = fetcher.get_containerslist() for container in conlist: ans = {} confetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) ans = confetcher.get_basic_info(container) ans['cpu_use'] = confetcher.get_cpu_use(container) ans['mem_use'] = confetcher.get_mem_use(container) res.append(ans) else: return json.dumps({'success':'false', 'message':'not supported request'}) return json.dumps({'success':'true', 'monitor':res}) @app.route("/monitor/vnodes///", methods=['POST']) @login_required def vnodes_monitor(user, beans, form, con_id, issue): global G_clustername global G_historymgr logger.info("handle request: monitor/vnodes") res = {} fetcher = monitor.Container_Fetcher(con_id) if issue == 'info': res = fetcher.get_info() elif issue == 'cpu_use': res['cpu_use'] = fetcher.get_cpu_use() elif issue == 'mem_use': res['mem_use'] = fetcher.get_mem_use() elif issue == 'disk_use': res['disk_use'] = fetcher.get_disk_use() elif issue == 'basic_info': res['basic_info'] = fetcher.get_basic_info() elif issue == 'net_stats': res['net_stats'] = fetcher.get_net_stats() elif issue == 'history': res['history'] = G_historymgr.getHistory(con_id) elif issue == 'owner': names = con_id.split('-') result = post_to_user("/user/query/", data = {"token": form.get(token)}) if result['success'] == 'false': res['username'] = "" res['truename'] = "" else: res['username'] = result['data']['username'] res['truename'] = result['data']['truename'] else: res = "Unspported Method!" return json.dumps({'success':'true', 'monitor':res}) @app.route("/monitor/user//", methods=['POST']) @login_required def user_quotainfo_monitor(user, beans, form, issue): global G_historymgr if issue == 'quotainfo': logger.info("handle request: monitor/user/quotainfo/") user_info = post_to_user("/user/selfQuery/", {'token':form.get("token")}) quotainfo = user_info['data']['groupinfo'] return json.dumps({'success':'true', 'quotainfo':quotainfo}) elif issue == 'createdvnodes': logger.info("handle request: monitor/user/createdvnodes/") res = G_historymgr.getCreatedVNodes(user) return json.dumps({'success':'true', 'createdvnodes':res}) elif issue == 'net_stats': logger.info("handle request: monitor/user/net_stats/") res = monitor.Container_Fetcher.get_user_net_stats(user) return json.dumps({'success':'true', 'net_stats':res}) else: return json.dumps({'success':'false', 'message':"Unspported Method!"}) @app.route("/monitor/listphynodes/", methods=['POST']) @login_required def listphynodes_monitor(user, beans, form): global G_nodemgr logger.info("handle request: monitor/listphynodes/") res = {} res['allnodes'] = G_nodemgr.get_nodeips() return json.dumps({'success':'true', 'monitor':res}) @app.route("/monitor/pending_gpu_tasks/", methods=['POST']) @login_required def pending_gpu_tasks_monitor(user, beans, form): global G_taskmgr logger.info("handle request: monitor/pending_gpu_tasks/") res = {} res['pending_tasks'] = G_taskmgr.get_pending_gpu_tasks_info() return json.dumps({'success':'true', 'monitor':res}) @app.route("/billing/beans/", methods=['POST']) @auth_key_required def billing_beans(): form = request.form res = post_to_user("/billing/beans/",data=form) logger.info(res) return json.dumps(res) @app.route("/system/parmList/", methods=['POST']) @login_required def parmList_system(user, beans, form): global G_sysmgr logger.info("handle request: system/parmList/") result = G_sysmgr.getParmList() return json.dumps(result) @app.route("/system/modify/", methods=['POST']) @login_required def modify_system(user, beans, form): global G_sysmgr logger.info("handle request: system/modify/") field = form.get("field", None) parm = form.get("parm", None) val = form.get("val", None) [status, message] = G_sysmgr.modify(field,parm,val) if status is True: return json.dumps({'success':'true', 'action':'modify_system'}) else: return json.dumps({'success':'false', 'message': message}) return json.dumps(result) @app.route("/system/clear_history/", methods=['POST']) @login_required def clear_system(user, beans, form): global G_sysmgr logger.info("handle request: system/clear_history/") field = form.get("field", None) parm = form.get("parm", None) [status, message] = G_sysmgr.clear(field,parm) if status is True: return json.dumps({'success':'true', 'action':'clear_history'}) else: return json.dumps({'success':'false', 'message': message}) return json.dumps(result) @app.route("/system/add/", methods=['POST']) @login_required def add_system(user, beans, form): global G_sysmgr logger.info("handle request: system/add/") field = form.get("field", None) parm = form.get("parm", None) val = form.get("val", None) [status, message] = G_sysmgr.add(field, parm, val) if status is True: return json.dumps({'success':'true', 'action':'add_parameter'}) else: return json.dumps({'success':'false', 'message': message}) return json.dumps(result) @app.route("/system/delete/", methods=['POST']) @login_required def delete_system(user, beans, form): global G_sysmgr logger.info("handle request: system/delete/") field = form.get("field", None) parm = form.get("parm", None) [status, message] = G_sysmgr.delete(field,parm) if status is True: return json.dumps({'success':'true', 'action':'delete_parameter'}) else: return json.dumps({'success':'false', 'message': message}) return json.dumps(result) @app.route("/system/reset_all/", methods=['POST']) @login_required def resetall_system(user, beans, form): global G_sysmgr logger.info("handle request: system/reset_all/") field = form.get("field", None) [status, message] = G_sysmgr.reset_all(field) if status is True: return json.dumps({'success':'true', 'action':'reset_all'}) else: return json.dumps({'success':'false', 'message': message}) return json.dumps(result) @app.route("/batch/job/add/", methods=['POST']) @login_required @beans_check def add_job(user,beans,form): global G_jobmgr job_data = form.to_dict() job_info = { 'tasks': {} } message = { 'success': 'true', 'message': 'add batch job success' } for key in job_data: if key == 'csrf_token': continue key_arr = key.split('_') value = job_data[key] if key_arr[0] == 'srcAddr' and value == '': #task_idx = 'task_' + key_arr[1] if task_idx in job_info['tasks']: job_info['tasks'][task_idx]['srcAddr'] = '/root' else: job_info['tasks'][task_idx] = { 'srcAddr': '/root' } elif key_arr[0] != 'dependency'and value == '': message['success'] = 'false' message['message'] = 'value of %s is null' % key elif len(key_arr) == 1: job_info[key_arr[0]] = value elif len(key_arr) == 2: key_prefix, task_idx = key_arr[0], key_arr[1] #task_idx = 'task_' + task_idx if task_idx in job_info["tasks"]: job_info["tasks"][task_idx][key_prefix] = value else: tmp_dict = { key_prefix: value } job_info["tasks"][task_idx] = tmp_dict elif len(key_arr) == 3: key_prefix, task_idx, mapping_idx = key_arr[0], key_arr[1], key_arr[2] #task_idx = 'task_' + task_idx mapping_idx = 'mapping_' + mapping_idx if task_idx in job_info["tasks"]: if "mapping" in job_info["tasks"][task_idx]: if mapping_idx in job_info["tasks"][task_idx]["mapping"]: job_info["tasks"][task_idx]["mapping"][mapping_idx][key_prefix] = value else: tmp_dict = { key_prefix: value } job_info["tasks"][task_idx]["mapping"][mapping_idx] = tmp_dict else: job_info["tasks"][task_idx]["mapping"] = { mapping_idx: { key_prefix: value } } else: tmp_dict = { "mapping":{ mapping_idx: { key_prefix: value } } } job_info["tasks"][task_idx] = tmp_dict logger.debug('batch job adding info %s' % json.dumps(job_info, indent=4)) [status, msg] = G_jobmgr.add_job(user, job_info) if status: return json.dumps(message) else: logger.debug('fail to add batch job: %s' % msg) message["success"] = "false" message["message"] = msg return json.dumps(message) return json.dumps(message) @app.route("/batch/job/list/", methods=['POST']) @login_required def list_job(user,beans,form): global G_jobmgr result = { 'success': 'true', 'data': G_jobmgr.list_jobs(user) } return json.dumps(result) @app.route("/batch/job/listall/", methods=['POST']) @login_required def list_all_job(user,beans,form): global G_jobmgr result = { 'success': 'true', 'data': G_jobmgr.list_all_jobs() } return json.dumps(result) @app.route("/batch/job/info/", methods=['POST']) @login_required def info_job(user,beans,form): global G_jobmgr jobid = form.get("jobid","") [success, data] = G_jobmgr.get_job(user, jobid) if success: return json.dumps({'success':'true', 'data':data}) else: return json.dumps({'success':'false', 'message': data}) @app.route("/batch/job/stop/", methods=['POST']) @login_required def stop_job(user,beans,form): global G_jobmgr jobid = form.get("jobid","") [success,msg] = G_jobmgr.stop_job(user,jobid) if success: return json.dumps({'success':'true', 'action':'stop job'}) else: return json.dumps({'success':'false', 'message': msg}) @app.route("/batch/job/output/", methods=['POST']) @login_required def get_output(user,beans,form): global G_jobmgr jobid = form.get("jobid","") taskid = form.get("taskid","") vnodeid = form.get("vnodeid","") issue = form.get("issue","") result = { 'success': 'true', 'data': G_jobmgr.get_output(user,jobid,taskid,vnodeid,issue) } return json.dumps(result) @app.route("/batch/task/info/", methods=['POST']) @login_required def info_task(user,beans,form): pass @app.route("/batch/vnodes/list/", methods=['POST']) @login_required def batch_vnodes_list(user,beans,form): global G_taskmgr result = { 'success': 'true', 'data': G_taskmgr.get_user_batch_containers(user) } return json.dumps(result) # @app.route("/inside/cluster/scaleout/", methods=['POST']) # @inside_ip_required # def inside_cluster_scalout(cur_user, cluster_info, form): # global G_usermgr # global G_vclustermgr # clustername = cluster_info['name'] # logger.info("handle request : scale out %s" % clustername) # image = {} # image['name'] = form.get("imagename", None) # image['type'] = form.get("imagetype", None) # image['owner'] = form.get("imageowner", None) # user_info = G_usermgr.selfQuery(cur_user = cur_user) # user = user_info['data']['username'] # user_info = json.dumps(user_info) # setting = { # 'cpu': form.get('cpuSetting'), # 'memory': form.get('memorySetting'), # 'disk': form.get('diskSetting') # } # [status, result] = G_usermgr.usageInc(cur_user = cur_user, modification = setting) # if not status: # return json.dumps({'success':'false', 'action':'scale out', 'message': result}) # [status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info, setting) # if status: # return json.dumps({'success':'true', 'action':'scale out', 'message':result}) # else: # G_usermgr.usageRecover(cur_user = cur_user, modification = setting) # return json.dumps({'success':'false', 'action':'scale out', 'message':result}) @app.errorhandler(500) def internal_server_error(error): logger.debug("An internel server error occured") logger.error(traceback.format_exc()) return json.dumps({'success':'false', 'message':'500 Internal Server Error', 'Unauthorized': 'True'}) if __name__ == '__main__': logger.info('Start Flask...:') try: secret_key_file = open(env.getenv('FS_PREFIX') + '/local/httprest_secret_key.txt') app.secret_key = secret_key_file.read() secret_key_file.close() except: from base64 import b64encode from os import urandom secret_key = urandom(24) secret_key = b64encode(secret_key).decode('utf-8') app.secret_key = secret_key secret_key_file = open(env.getenv('FS_PREFIX') + '/local/httprest_secret_key.txt', 'w') secret_key_file.write(secret_key) secret_key_file.close() os.environ['APP_KEY'] = app.secret_key runcmd = sys.argv[0] app.runpath = runcmd.rsplit('/', 1)[0] global G_nodemgr global G_vclustermgr global G_notificationmgr global etcdclient global G_networkmgr global G_clustername global G_sysmgr global G_historymgr global G_applicationmgr global G_ulockmgr global G_cloudmgr global G_jobmgr global G_taskmgr # move 'tools.loadenv' to the beginning of this file fs_path = env.getenv("FS_PREFIX") logger.info("using FS_PREFIX %s" % fs_path) etcdaddr = env.getenv("ETCD") logger.info("using ETCD %s" % etcdaddr) G_clustername = env.getenv("CLUSTER_NAME") logger.info("using CLUSTER_NAME %s" % G_clustername) # get network interface net_dev = env.getenv("NETWORK_DEVICE") logger.info("using NETWORK_DEVICE %s" % net_dev) ipaddr = network.getip(net_dev) if ipaddr==False: logger.error("network device is not correct") sys.exit(1) else: logger.info("using ipaddr %s" % ipaddr) # init etcdlib client try: etcdclient = etcdlib.Client(etcdaddr, prefix = G_clustername) except Exception: logger.error ("connect etcd failed, maybe etcd address not correct...") sys.exit(1) mode = 'recovery' if len(sys.argv) > 1 and sys.argv[1] == "new": mode = 'new' # get public IP and set public Ip in etcd public_IP = env.getenv("PUBLIC_IP") etcdclient.setkey("machines/publicIP/"+ipaddr, public_IP) # do some initialization for mode: new/recovery if mode == 'new': # clean and initialize the etcd table if etcdclient.isdir(""): etcdclient.clean() else: etcdclient.createdir("") # token is saved at fs_path/golbal/token token = tools.gen_token() tokenfile = open(fs_path+"/global/token", 'w') tokenfile.write(token) tokenfile.write("\n") tokenfile.close() etcdclient.setkey("token", token) etcdclient.setkey("service/master", ipaddr) etcdclient.setkey("service/mode", mode) etcdclient.createdir("machines/allnodes") etcdclient.createdir("machines/runnodes") etcdclient.setkey("vcluster/nextid", "1") # clean all users vclusters files : FS_PREFIX/global/users//clusters/ usersdir = fs_path+"/global/users/" for user in os.listdir(usersdir): shutil.rmtree(usersdir+user+"/clusters") shutil.rmtree(usersdir+user+"/hosts") os.mkdir(usersdir+user+"/clusters") os.mkdir(usersdir+user+"/hosts") else: # check whether cluster exists if not etcdclient.isdir("")[0]: logger.error ("cluster not exists, you should use mode:new ") sys.exit(1) # initialize the etcd table for recovery token = tools.gen_token() tokenfile = open(fs_path+"/global/token", 'w') tokenfile.write(token) tokenfile.write("\n") tokenfile.close() etcdclient.setkey("token", token) etcdclient.setkey("service/master", ipaddr) etcdclient.setkey("service/mode", mode) if etcdclient.isdir("_lock")[0]: etcdclient.deldir("_lock") #init portcontrol portcontrol.init_new() G_ulockmgr = lockmgr.LockMgr() clusternet = env.getenv("CLUSTER_NET") logger.info("using CLUSTER_NET %s" % clusternet) G_sysmgr = sysmgr.SystemManager() G_networkmgr = network.NetworkMgr(clusternet, etcdclient, mode, ipaddr) G_networkmgr.printpools() G_cloudmgr = cloudmgr.CloudMgr() # start NodeMgr and NodeMgr will wait for all nodes to start ... G_nodemgr = nodemgr.NodeMgr(G_networkmgr, etcdclient, addr = ipaddr, mode=mode) logger.info("nodemgr started") distributedgw = env.getenv("DISTRIBUTED_GATEWAY") G_vclustermgr = vclustermgr.VclusterMgr(G_nodemgr, G_networkmgr, etcdclient, ipaddr, mode, distributedgw) logger.info("vclustermgr started") G_imagemgr = imagemgr.ImageMgr() logger.info("imagemgr started") G_releasemgr = releasemgr.ReleaseMgr(G_vclustermgr,G_ulockmgr) G_releasemgr.start() logger.info("releasemgr started") logger.info("startting to listen on: ") masterip = env.getenv('MASTER_IP') logger.info("using MASTER_IP %s", masterip) masterport = env.getenv('MASTER_PORT') logger.info("using MASTER_PORT %d", int(masterport)) G_historymgr = History_Manager() master_collector = monitor.Master_Collector(G_nodemgr,ipaddr+":"+str(masterport)) master_collector.start() logger.info("master_collector started") # server = http.server.HTTPServer((masterip, masterport), DockletHttpHandler) logger.info("starting master server") G_taskmgr = taskmgr.TaskMgr(G_nodemgr, monitor.Fetcher, ipaddr) G_jobmgr = jobmgr.JobMgr(G_taskmgr) G_taskmgr.set_jobmgr(G_jobmgr) G_taskmgr.start() app.run(host = masterip, port = masterport, threaded=True) ================================================ FILE: src/master/jobmgr.py ================================================ import time, threading, random, string, os, traceback, requests import master.monitor import subprocess,json from functools import wraps from datetime import datetime from utils.log import initlogging, logger from utils.model import db, Batchjob, Batchtask from utils import env def db_commit(): try: db.session.commit() except Exception as err: db.session.rollback() logger.error(traceback.format_exc()) raise class BatchJob(object): def __init__(self, jobid, user, job_info, old_job_db=None): if old_job_db is None: self.job_db = Batchjob(jobid,user,job_info['jobName'],int(job_info['jobPriority'])) else: self.job_db = old_job_db self.job_db.clear() job_info = {} job_info['jobName'] = self.job_db.name job_info['jobPriority'] = self.job_db.priority all_tasks = self.job_db.tasks.all() job_info['tasks'] = {} for t in all_tasks: job_info['tasks'][t.idx] = json.loads(t.config) self.user = user #self.raw_job_info = job_info self.job_id = jobid self.job_name = job_info['jobName'] self.job_priority = int(job_info['jobPriority']) self.lock = threading.Lock() self.tasks = {} self.dependency_out = {} self.tasks_cnt = {'pending':0, 'scheduling':0, 'running':0, 'retrying':0, 'failed':0, 'finished':0, 'stopped':0} #init self.tasks & self.dependency_out & self.tasks_cnt logger.debug("Init BatchJob user:%s job_name:%s create_time:%s" % (self.job_db.username, self.job_db.name, str(self.job_db.create_time))) raw_tasks = job_info["tasks"] self.tasks_cnt['pending'] = len(raw_tasks.keys()) for task_idx in raw_tasks.keys(): task_info = raw_tasks[task_idx] if old_job_db is None: task_db = Batchtask(jobid+"_"+task_idx, task_idx, task_info) self.job_db.tasks.append(task_db) else: task_db = Batchtask.query.get(jobid+"_"+task_idx) task_db.clear() self.tasks[task_idx] = {} self.tasks[task_idx]['id'] = jobid+"_"+task_idx self.tasks[task_idx]['config'] = task_info self.tasks[task_idx]['db'] = task_db self.tasks[task_idx]['status'] = 'pending' self.tasks[task_idx]['dependency'] = [] dependency = task_info['dependency'].strip().replace(' ', '').split(',') if len(dependency) == 1 and dependency[0] == '': continue for d in dependency: if not d in raw_tasks.keys(): raise ValueError('task %s is not defined in the dependency of task %s' % (d, task_idx)) self.tasks[task_idx]['dependency'].append(d) if not d in self.dependency_out.keys(): self.dependency_out[d] = [] self.dependency_out[d].append(task_idx) if old_job_db is None: db.session.add(self.job_db) db_commit() self.log_status() logger.debug("BatchJob(id:%s) dependency_out: %s" % (self.job_db.id, json.dumps(self.dependency_out, indent=3))) def data_lock(f): @wraps(f) def new_f(self, *args, **kwargs): self.lock.acquire() try: result = f(self, *args, **kwargs) except Exception as err: self.lock.release() raise err self.lock.release() return result return new_f # return the tasks without dependencies @data_lock def get_tasks_no_dependency(self,update_status=False): logger.debug("Get tasks without dependencies of BatchJob(id:%s)" % self.job_db.id) ret_tasks = [] for task_idx in self.tasks.keys(): if (self.tasks[task_idx]['status'] == 'pending' and len(self.tasks[task_idx]['dependency']) == 0): if update_status: self.tasks_cnt['pending'] -= 1 self.tasks_cnt['scheduling'] += 1 self.tasks[task_idx]['db'] = Batchtask.query.get(self.tasks[task_idx]['id']) self.tasks[task_idx]['db'].status = 'scheduling' self.tasks[task_idx]['status'] = 'scheduling' task_name = self.tasks[task_idx]['db'].id ret_tasks.append([task_name, self.tasks[task_idx]['config'], self.job_priority]) self.log_status() db_commit() return ret_tasks @data_lock def stop_job(self): self.job_db = Batchjob.query.get(self.job_id) self.job_db.status = 'stopping' db_commit() # update status of this job based def _update_job_status(self): allcnt = len(self.tasks.keys()) if self.tasks_cnt['failed'] != 0: self.job_db.status = 'failed' self.job_db.end_time = datetime.now() elif self.tasks_cnt['finished'] == allcnt: self.job_db.status = 'done' self.job_db.end_time = datetime.now() elif self.job_db.status == 'stopping': if self.tasks_cnt['running'] == 0 and self.tasks_cnt['scheduling'] == 0 and self.tasks_cnt['retrying'] == 0: self.job_db.status = 'stopped' self.job_db.end_time = datetime.now() elif self.tasks_cnt['running'] != 0 or self.tasks_cnt['retrying'] != 0: self.job_db.status = 'running' else: self.job_db.status = 'pending' db_commit() # start run a task, update status @data_lock def update_task_running(self, task_idx): logger.debug("Update status of task(idx:%s) of BatchJob(id:%s) running." % (task_idx, self.job_id)) old_status = self.tasks[task_idx]['status'] if old_status == 'stopping': logger.info("Task(idx:%s) of BatchJob(id:%s) has been stopped."% (task_idx, self.job_id)) return self.tasks_cnt[old_status] -= 1 self.tasks[task_idx]['status'] = 'running' self.tasks[task_idx]['db'] = Batchtask.query.get(self.tasks[task_idx]['id']) self.tasks[task_idx]['db'].status = 'running' self.tasks[task_idx]['db'].start_time = datetime.now() self.tasks_cnt['running'] += 1 self.job_db = Batchjob.query.get(self.job_id) self._update_job_status() self.log_status() # a task has finished, update dependency and return tasks without dependencies @data_lock def finish_task(self, task_idx, running_time, billing): if task_idx not in self.tasks.keys(): logger.error('Task_idx %s not in job. user:%s job_name:%s job_id:%s'%(task_idx, self.user, self.job_name, self.job_id)) return [] logger.debug("Task(idx:%s) of BatchJob(id:%s) has finished(running_time=%d,billing=%d). Update dependency..." % (task_idx, self.job_id, running_time, billing)) old_status = self.tasks[task_idx]['status'] if old_status == 'stopping': logger.info("Task(idx:%s) of BatchJob(id:%s) has been stopped."% (task_idx, self.job_id)) return self.tasks_cnt[old_status] -= 1 self.tasks[task_idx]['status'] = 'finished' self.tasks[task_idx]['db'] = Batchtask.query.get(self.tasks[task_idx]['id']) self.tasks[task_idx]['db'].status = 'finished' self.tasks[task_idx]['db'].tried_times += 1 self.tasks[task_idx]['db'].running_time = running_time self.tasks[task_idx]['db'].end_time = datetime.now() self.tasks[task_idx]['db'].billing = billing self.tasks[task_idx]['db'].failed_reason = "" self.job_db = Batchjob.query.get(self.job_id) self.job_db.billing += billing self.tasks_cnt['finished'] += 1 if task_idx not in self.dependency_out.keys(): self._update_job_status() self.log_status() return [] ret_tasks = [] for out_idx in self.dependency_out[task_idx]: try: self.tasks[out_idx]['dependency'].remove(task_idx) except Exception as err: logger.warning(traceback.format_exc()) continue if (self.tasks[out_idx]['status'] == 'pending' and len(self.tasks[out_idx]['dependency']) == 0): self.tasks_cnt['pending'] -= 1 self.tasks_cnt['scheduling'] += 1 self.tasks[out_idx]['status'] = 'scheduling' self.tasks[out_idx]['db'] = Batchtask.query.get(self.tasks[out_idx]['id']) self.tasks[out_idx]['db'].status = 'scheduling' task_name = self.job_id + '_' + out_idx ret_tasks.append([task_name, self.tasks[out_idx]['config'], self.job_priority]) self._update_job_status() self.log_status() return ret_tasks # update retrying status of task @data_lock def update_task_retrying(self, task_idx, reason, tried_times): logger.debug("Update status of task(idx:%s) of BatchJob(id:%s) retrying. reason:%s tried_times:%d" % (task_idx, self.job_id, reason, int(tried_times))) old_status = self.tasks[task_idx]['status'] if old_status == 'stopping': logger.info("Task(idx:%s) of BatchJob(id:%s) has been stopped."% (task_idx, self.job_id)) return self.tasks_cnt[old_status] -= 1 self.tasks_cnt['retrying'] += 1 self.tasks[task_idx]['db'] = Batchtask.query.get(self.tasks[task_idx]['id']) self.tasks[task_idx]['db'].status = 'retrying' self.tasks[task_idx]['db'].failed_reason = reason self.tasks[task_idx]['db'].tried_times += 1 self.tasks[task_idx]['status'] = 'retrying' self.job_db = Batchjob.query.get(self.job_id) self._update_job_status() self.log_status() # update failed status of task @data_lock def update_task_failed(self, task_idx, reason, tried_times, running_time, billing): logger.debug("Update status of task(idx:%s) of BatchJob(id:%s) failed. reason:%s tried_times:%d" % (task_idx, self.job_id, reason, int(tried_times))) old_status = self.tasks[task_idx]['status'] self.tasks_cnt[old_status] -= 1 self.tasks_cnt['failed'] += 1 self.tasks[task_idx]['status'] = 'failed' self.tasks[task_idx]['db'] = Batchtask.query.get(self.tasks[task_idx]['id']) self.tasks[task_idx]['db'].status = 'failed' self.tasks[task_idx]['db'].failed_reason = reason self.tasks[task_idx]['db'].tried_times += 1 self.tasks[task_idx]['db'].end_time = datetime.now() self.tasks[task_idx]['db'].running_time = running_time self.tasks[task_idx]['db'].billing = billing self.job_db = Batchjob.query.get(self.job_id) self.job_db.billing += billing self._update_job_status() self.log_status() @data_lock def update_task_stopped(self, task_idx, running_time, billing): logger.debug("Update status of task(idx:%s) of BatchJob(id:%s) stopped.running_time:%d billing:%d" % (task_idx, self.job_id, int(running_time), billing)) old_status = self.tasks[task_idx]['status'] if old_status == 'failed' or old_status == 'finished' or old_status == 'stopped': logger.info("task(idx:%s) of BatchJob(id:%s) has been done."%(task_idx, self.job_id)) return False self.tasks_cnt[old_status] -= 1 self.tasks_cnt['stopped'] += 1 self.tasks[task_idx]['status'] = 'stopped' self.tasks[task_idx]['db'] = Batchtask.query.get(self.tasks[task_idx]['id']) self.tasks[task_idx]['db'].status = 'stopped' self.tasks[task_idx]['db'].end_time = datetime.now() self.tasks[task_idx]['db'].running_time = running_time self.tasks[task_idx]['db'].billing = billing self.job_db = Batchjob.query.get(self.job_id) self.job_db.billing += billing self._update_job_status() self.log_status() return True # print status for debuging def log_status(self): task_copy = {} for task_idx in self.tasks.keys(): task_copy[task_idx] = {} task_copy[task_idx]['status'] = self.tasks[task_idx]['status'] task_copy[task_idx]['dependency'] = self.tasks[task_idx]['dependency'] logger.debug("BatchJob(id:%s) tasks status: %s" % (self.job_id, json.dumps(task_copy, indent=3))) logger.debug("BatchJob(id:%s) tasks_cnt: %s" % (self.job_id, self.tasks_cnt)) logger.debug("BatchJob(id:%s) job_status: %s" %(self.job_id, self.job_db.status)) class JobMgr(): # load job information from etcd # initial a job queue and job schedueler def __init__(self, taskmgr): logger.info("Init jobmgr...") try: Batchjob.query.all() except: db.create_all(bind='__all__') self.job_map = {} self.taskmgr = taskmgr self.fspath = env.getenv('FS_PREFIX') self.lock = threading.Lock() self.userpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT')) self.auth_key = env.getenv('AUTH_KEY') self.recover_jobs() def recover_jobs(self): logger.info("Rerun the unfailed and unfinished jobs...") try: rejobs = Batchjob.query.filter(~Batchjob.status.in_(['done','failed','stopped'])) rejobs = rejobs.order_by(Batchjob.create_time).all() for rejob in rejobs: logger.info("Rerun job: "+rejob.id) logger.debug(str(rejob)) job = BatchJob(rejob.id, rejob.username, None, rejob) self.job_map[job.job_id] = job self.process_job(job) except Exception as err: logger.error(traceback.format_exc()) def charge_beans(self,username,billing): logger.debug("Charge user(%s) for %d beans"%(username, billing)) data = {"owner_name":username,"billing":billing, "auth_key":self.auth_key} url = "/billing/beans/" return requests.post(self.userpoint+url,data=data).json() def add_lock(f): @wraps(f) def new_f(self, *args, **kwargs): self.lock.acquire() try: result = f(self, *args, **kwargs) except Exception as err: self.lock.release() raise err self.lock.release() return result return new_f @add_lock def create_job(self, user, job_info): jobid = self.gen_jobid() job = BatchJob(jobid, user, job_info) return job # user: username # job_info: a json string # user submit a new job, add this job to queue and database def add_job(self, user, job_info): try: job = self.create_job(user, job_info) self.job_map[job.job_id] = job self.process_job(job) except ValueError as err: logger.error(err) return [False, err.args[0]] except Exception as err: logger.error(traceback.format_exc()) #logger.error(err) return [False, err.args[0]] return [True, "add batch job success"] # user: username # jobid: the id of job def stop_job(self, user, job_id): logger.info("[jobmgr] stop job(id:%s) user(%s)"%(job_id, user)) if job_id not in self.job_map.keys(): return [False,"Job id %s does not exists! Maybe it has been finished."%job_id] try: job = self.job_map[job_id] if job.job_db.status == 'done' or job.job_db.status == 'failed': return [True,""] if job.user != user and user != 'root': raise Exception("Wrong User.") for task_idx in job.tasks.keys(): taskid = job_id + '_' + task_idx self.taskmgr.lazy_stop_task(taskid) job.stop_job() except Exception as err: logger.error(traceback.format_exc()) #logger.error(err) return [False, err.args[0]] return [True,""] # user: username # list a user's all job def list_jobs(self,user): alljobs = Batchjob.query.filter_by(username=user).all() res = [] for job in alljobs: jobdata = json.loads(str(job)) tasks = job.tasks.all() jobdata['tasks'] = [t.idx for t in tasks] tasks_vnodeCount = {} for t in tasks: tasks_vnodeCount[t.idx] = int(json.loads(t.config)['vnodeCount']) jobdata['tasks_vnodeCount'] = tasks_vnodeCount res.append(jobdata) return res # list all users' jobs def list_all_jobs(self): alljobs = Batchjob.query.all() res = [] for job in alljobs: jobdata = json.loads(str(job)) tasks = job.tasks.all() jobdata['tasks'] = [t.idx for t in tasks] tasks_vnodeCount = {} for t in tasks: tasks_vnodeCount[t.idx] = int(json.loads(t.config)['vnodeCount']) jobdata['tasks_vnodeCount'] = tasks_vnodeCount res.append(jobdata) return res # user: username # jobid: the id of job # get the information of a job, including the status, json description and other information def get_job(self, user, job_id): job = Batchjob.query.get(job_id) if job is None: return [False, "Jobid(%s) does not exist."%job_id] if job.username != user and user != 'root': return [False, "Wrong User!"] jobdata = json.loads(str(job)) tasks = job.tasks.order_by(Batchtask.idx).all() tasksdata = [json.loads(str(t)) for t in tasks] for i in range(len(tasksdata)): if tasksdata[i]['status'] == 'scheduling': order = self.taskmgr.get_task_order(tasksdata[i]['id']) tasksdata[i]['order'] = order jobdata['tasks'] = tasksdata return [True, jobdata] # check if a job exists def is_job_exist(self, job_id): return Batchjob.query.get(job_id) != None # generate a random job id def gen_jobid(self): datestr = datetime.now().strftime("%y%m%d") job_id = datestr+''.join(random.sample(string.ascii_letters + string.digits, 3)) while self.is_job_exist(job_id): job_id = datestr+''.join(random.sample(string.ascii_letters + string.digits, 3)) return job_id # add tasks into taskmgr's queue def add_task_taskmgr(self, user, tasks): for task_name, task_info, task_priority in tasks: if not task_info: logger.error("task_info does not exist! task_name(%s)" % task_name) return False else: logger.debug("Add task(name:%s) with priority(%s) to taskmgr's queue." % (task_name, task_priority) ) self.taskmgr.add_task(user, task_name, task_info, task_priority) return True # to process a job, add tasks without dependencies of the job into taskmgr def process_job(self, job): tasks = job.get_tasks_no_dependency(True) return self.add_task_taskmgr(job.user, tasks) # report task status from taskmgr when running, failed and finished # task_name: job_id + '_' + task_idx # status: 'running', 'finished', 'retrying', 'failed', 'stopped' # reason: reason for failure or retrying, such as "FAILED", "TIMEOUT", "OUTPUTERROR" # tried_times: how many times the task has been tried. def report(self, user, task_name, status, reason="", tried_times=1, running_time=0, billing=0): split_task_name = task_name.split('_') if len(split_task_name) != 2: logger.error("[jobmgr report]Illegal task_name(%s) report from taskmgr" % task_name) return if billing > 0 and (status == 'failed' or status == 'finished'): self.charge_beans(user, billing) job_id, task_idx = split_task_name if job_id not in self.job_map.keys(): logger.error("[jobmgr report]jobid(%s) does not exist. task_name(%s)" % (job_id,task_name)) #update data in db taskdb = Batchtask.query.get(task_name) if (taskdb is None or taskdb.status == 'finished' or taskdb.status == 'failed' or taskdb.status == 'stopped'): return taskdb.status = status if status == 'failed': taskdb.failed_reason = reason if status == 'failed' or status == 'stopped' or status == 'finished': taskdb.end_time = datetime.now() if billing > 0: taskdb.running_time = running_time taskdb.billing = billing db_commit() return job = self.job_map[job_id] if status == "running": #logger.debug(str(job.job_db)) job.update_task_running(task_idx) #logger.debug(str(job.job_db)) elif status == "finished": #logger.debug(str(job.job_db)) next_tasks = job.finish_task(task_idx, running_time, billing) ret = self.add_task_taskmgr(user, next_tasks) #logger.debug(str(job.job_db)) elif status == "retrying": job.update_task_retrying(task_idx, reason, tried_times) elif status == "failed": job.update_task_failed(task_idx, reason, tried_times, running_time, billing) elif status == "stopped": if job.update_task_stopped(task_idx, running_time, billing) and billing > 0: self.charge_beans(user, billing) if job.job_db.status == 'done' or job.job_db.status == 'failed' or job.job_db.status == 'stopped': del self.job_map[job_id] # Get Batch job stdout or stderr from its file def get_output(self, username, jobid, taskid, vnodeid, issue): filename = jobid + "_" + taskid + "_" + vnodeid + "_" + issue + ".txt" fpath = "%s/global/users/%s/data/batch_%s/%s" % (self.fspath,username,jobid,filename) logger.info("Get output from:%s" % fpath) try: ret = subprocess.run('tail -n 100 ' + fpath,stdout=subprocess.PIPE,stderr=subprocess.STDOUT, shell=True) if ret.returncode != 0: raise IOError(ret.stdout.decode(encoding="utf-8")) except Exception as err: logger.error(traceback.format_exc()) return "" else: return ret.stdout.decode(encoding="utf-8") ================================================ FILE: src/master/lockmgr.py ================================================ #!/usr/bin/python3 ''' This module is the manager of threadings locks. A LockMgr manages multiple threadings locks. ''' import threading class LockMgr: def __init__(self): # self.locks will store multiple locks by their names. self.locks = {} # the lock of self.locks, is to ensure that only one thread can update it at the same time self.locks_lock = threading.Lock() # acquire a lock by its name def acquire(self, lock_name): self.locks_lock.acquire() if lock_name not in self.locks.keys(): self.locks[lock_name] = threading.Lock() self.locks_lock.release() self.locks[lock_name].acquire() return # release a lock by its name def release(self, lock_name): if lock_name not in self.locks.keys(): return self.locks[lock_name].release() return ================================================ FILE: src/master/monitor.py ================================================ import threading, time, traceback from utils import env from utils.log import logger from httplib2 import Http from urllib.parse import urlencode # major dict to store the monitoring data # only use on Master # monitor_hosts: use workers' ip addresses as first key. # second key: cpuinfo,diskinfo,meminfo,osinfo,cpuconfig,running,containers,containerslist # 1.cpuinfo stores the cpu usages data, and it has keys: user,system,idle,iowait # 2.diskinfo stores the disks usages data, and it has keys: device,mountpoint,total,used,free,percent # 3.meminfo stores the memory usages data, and it has keys: total,used,free,buffers,cached,percent # 4.osinfo stores the information of operating system, # and it has keys: platform,system,node,release,version,machine,processor # 5.cpuconfig stores the information of processors, and it is a list, each element of list is a dict # which stores the information of a processor, each element has key: processor,model name, # core id, cpu MHz, cache size, physical id. # 6.running indicates the status of worker,and it has two values: True, False. # 7.containers store the amount of containers on the worker. # 8.containers store a list which consists of the names of containers on the worker. monitor_hosts = {} # monitor_vnodes: use the owners' names of vnodes(containers) as first key. # use the names of vnodes(containers) as second key. # third key: cpu_use,mem_use,disk_use,basic_info,quota # 1.cpu_use has keys: val,unit,hostpercent # 2.mem_use has keys: val,unit,usedp # 3.disk_use has keys: device,mountpoint,total,used,free,percent # 4.basic_info has keys: Name,State,PID,IP,RunningTime,billing,billing_this_hour # 5.quota has keys: cpu,memeory monitor_vnodes = {} # get owner name of a container def get_owner(container_name): names = container_name.split('-') return names[0] # the thread to collect data from each worker and store them in monitor_hosts and monitor_vnodes class Master_Collector(threading.Thread): def __init__(self,nodemgr,master_ip): threading.Thread.__init__(self) self.thread_stop = False self.nodemgr = nodemgr self.master_ip = master_ip self.net_lastbillings = {} self.bytes_per_beans = 1000000000 return def net_billings(self, username, now_bytes_total): global monitor_vnodes if not username in self.net_lastbillings.keys(): self.net_lastbillings[username] = 0 elif int(now_bytes_total/self.bytes_per_beans) < self.net_lastbillings[username]: self.net_lastbillings[username] = 0 diff = int(now_bytes_total/self.bytes_per_beans) - self.net_lastbillings[username] if diff > 0: auth_key = env.getenv('AUTH_KEY') data = {"owner_name":username,"billing":diff, "auth_key":auth_key} header = {'Content-Type':'application/x-www-form-urlencoded'} http = Http() [resp,content] = http.request("http://"+self.master_ip+"/billing/beans/","POST",urlencode(data),headers = header) logger.info("response from master:"+content.decode('utf-8')) self.net_lastbillings[username] += diff monitor_vnodes[username]['net_stats']['net_billings'] = self.net_lastbillings[username] def run(self): global monitor_hosts global monitor_vnodes while not self.thread_stop: for worker in monitor_hosts.keys(): monitor_hosts[worker]['running'] = False workers = self.nodemgr.get_nodeips() for worker in workers: try: ip = worker workerrpc = self.nodemgr.ip_to_rpc(worker) # fetch data info = list(eval(workerrpc.workerFetchInfo(self.master_ip))) #logger.info(info[0]) # store data in monitor_hosts and monitor_vnodes monitor_hosts[ip] = info[0] for container in info[1].keys(): owner = get_owner(container) if not owner in monitor_vnodes.keys(): monitor_vnodes[owner] = {} monitor_vnodes[owner][container] = info[1][container] for user in info[2].keys(): if not user in monitor_vnodes.keys(): continue else: monitor_vnodes[user]['net_stats'] = info[2][user] self.net_billings(user, info[2][user]['bytes_total']) except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) time.sleep(2) #logger.info(History.query.all()) #logger.info(VNode.query.all()) return def stop(self): self.thread_stop = True return # master use this class to fetch specific data of containers(vnodes) class Container_Fetcher: def __init__(self,container_name): self.owner = get_owner(container_name) self.con_id = container_name return def get_info(self): res = {} res['cpu_use'] = self.get_cpu_use() res['mem_use'] = self.get_mem_use() res['disk_use'] = self.get_disk_use() res['net_stats'] = self.get_net_stats() res['basic_info'] = self.get_basic_info() return res def get_cpu_use(self): global monitor_vnodes try: res = monitor_vnodes[self.owner][self.con_id]['cpu_use'] res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res def get_mem_use(self): global monitor_vnodes try: res = monitor_vnodes[self.owner][self.con_id]['mem_use'] res['quota'] = monitor_vnodes[self.owner][self.con_id]['quota'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res def get_disk_use(self): global monitor_vnodes try: res = monitor_vnodes[self.owner][self.con_id]['disk_use'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res def get_net_stats(self): global monitor_vnodes try: res = monitor_vnodes[self.owner][self.con_id]['net_stats'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res # get users' net_stats @staticmethod def get_user_net_stats(owner): global monitor_vnodes try: res = monitor_vnodes[owner]['net_stats'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res def get_basic_info(self): global monitor_vnodes try: res = monitor_vnodes[self.owner][self.con_id]['basic_info'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res # Master use this class to fetch specific data of physical machines(hosts) class Fetcher: def __init__(self,host): global monitor_hosts self.info = monitor_hosts[host] return #def get_clcnt(self): # return DockletMonitor.clcnt #def get_nodecnt(self): # return DockletMonitor.nodecnt #def get_meminfo(self): # return self.get_meminfo_('172.31.0.1') def get_meminfo(self): try: res = self.info['meminfo'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res def get_gpuinfo(self): try: res = self.info['gpuinfo'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res def get_cpuinfo(self): try: res = self.info['cpuinfo'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res def get_cpuconfig(self): try: res = self.info['cpuconfig'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res def get_diskinfo(self): try: res = self.info['diskinfo'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res def get_osinfo(self): try: res = self.info['osinfo'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res def get_concpuinfo(self): try: res = self.info['concpupercent'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res def get_containers(self): try: res = self.info['containers'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res def get_status(self): try: isexist = self.info['running'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) isexist = False if(isexist): return 'RUNNING' else: return 'STOPPED' def get_containerslist(self): try: res = self.info['containerslist'] except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) res = {} return res ================================================ FILE: src/master/network.py ================================================ #!/usr/bin/python3 import json, sys, netifaces, threading, traceback from utils.nettools import netcontrol,ovscontrol from utils.log import logger # getip : get ip from network interface # ifname : name of network interface def getip(ifname): if ifname not in netifaces.interfaces(): return False # No such interface else: addrinfo = netifaces.ifaddresses(ifname) if 2 in addrinfo: return netifaces.ifaddresses(ifname)[2][0]['addr'] else: return False # network interface is down def ip_to_int(addr): [a, b, c, d] = addr.split('.') return (int(a)<<24) + (int(b)<<16) + (int(c)<<8) + int(d) def int_to_ip(num): return str((num>>24)&255)+"."+str((num>>16)&255)+"."+str((num>>8)&255)+"."+str(num&255) # fix addr with cidr, for example, 172.16.0.10/24 --> 172.16.0.0/24 def fix_ip(addr, cidr): return int_to_ip( ip_to_int(addr) & ( (-1) << (32-int(cidr)) ) ) #return int_to_ip(ip_to_int(addr) & ( ~( (1<<(32-int(cidr)))-1 ) ) ) # jump to next interval address with cidr def next_interval(addr, cidr): addr = fix_ip(addr, int(cidr)) return int_to_ip(ip_to_int(addr)+(1<<(32-int(cidr)))) # jump to before interval address with cidr def before_interval(addr, cidr): addr = fix_ip(addr, int(cidr)) addrint = ip_to_int(addr)-(1<<(32-int(cidr))) # addrint maybe negative if addrint < 0: return "-1.-1.-1.-1" else: return int_to_ip(addrint) # IntervalPool : manage network blocks with IP/CIDR # Data Structure : # ... ... # cidr=16 : A1, A2, ... # A1 is an IP, means an interval [A1, A1+2^16-1], equals to A1/16 # cidr=17 : B1, B2, ... # ... ... # API : # allocate # free class IntervalPool(object): # cidr : 1,2, ..., 32 def __init__(self, addr_cidr=None, copy=None): if addr_cidr: self.pool = {} [addr, cidr] = addr_cidr.split('/') cidr = int(cidr) # fix addr with cidr, for example, 172.16.0.10/24 --> 172.16.0.0/24 addr = fix_ip(addr, cidr) self.info = addr+"/"+str(cidr) # init interval pool # cidr : [ addr ] # cidr+1 : [ ] # ... # 32 : [ ] self.pool[str(cidr)]=[addr] for i in range(cidr+1, 33): self.pool[str(i)]=[] elif copy: self.info = copy['info'] self.pool = copy['pool'] else: logger.error("IntervalPool init failed with no addr_cidr or center") def __str__(self): return json.dumps({'info':self.info, 'pool':self.pool}) def printpool(self): cidrs = list(self.pool.keys()) # sort with key=int(cidr) cidrs.sort(key=int) for i in cidrs: print (i + " : " + str(self.pool[i])) # allocate an interval with CIDR def allocate(self, thiscidr): # thiscidr -- cidr for this request # upcidr -- up stream which has interval to allocate thiscidr=int(thiscidr) upcidr = thiscidr # find first cidr who can allocate enough ips while((str(upcidr) in self.pool) and len(self.pool[str(upcidr)])==0): upcidr = upcidr-1 if str(upcidr) not in self.pool: return [False, 'Not Enough to Allocate'] # get the block/interval to allocate ips upinterval = self.pool[str(upcidr)][0] self.pool[str(upcidr)].remove(upinterval) # split the upinterval and put the rest intervals back to interval pool for i in range(int(thiscidr), int(upcidr), -1): self.pool[str(i)].append(next_interval(upinterval, i)) #self.pool[str(i)].sort(key=ip_to_int) # cidr between thiscidr and upcidr are null, no need to sort return [True, upinterval] # check whether the addr/cidr overlaps the self.pool # for example, addr/cidr=172.16.0.48/29 overlaps self.pool['24']=[172.16.0.0] def overlap(self, addr, cidr): cidr=int(cidr) start_cidr=int(self.info.split('/')[1]) # test self.pool[cidr] from first cidr pool to last cidr pool for cur_cidr in range(start_cidr, 33): if not self.pool[str(cur_cidr)]: continue # for every cur_cidr, test every possible element covered by pool[cur_cidr] in range of addr/cidr cur_addr=fix_ip(addr, min(cidr, cur_cidr)) last_addr=next_interval(addr, cidr) while(ip_to_int(cur_addr)=int(pool_cidr) and fix_ip(addr,pool_cidr)==pool_addr: return True else: return False # deallocate an interval with IP/CIDR def free(self, addr, cidr): if not self.inrange(addr, cidr): return [False, '%s/%s not in range of %s' % (addr, str(cidr), self.info)] if self.overlap(addr, cidr): return [False, '%s/%s overlaps the center pool:%s' % (addr, str(cidr), self.__str__())] cidr = int(cidr) # cidr not in pool means CIDR out of pool range if str(cidr) not in self.pool: return [False, 'CIDR not in pool'] addr = fix_ip(addr, cidr) # merge interval and move to up cidr while(True): # cidr-1 not in pool means current CIDR is the top CIDR if str(cidr-1) not in self.pool: break # if addr can satisfy cidr-1, and next_interval also exist, # merge addr with next_interval to up cidr (cidr-1) # if addr not satisfy cidr-1, and before_interval exist, # merge addr with before_interval to up cidr, and interval index is before_interval if addr == fix_ip(addr, cidr-1): if next_interval(addr, cidr) in self.pool[str(cidr)]: self.pool[str(cidr)].remove(next_interval(addr,cidr)) cidr=cidr-1 else: break else: if before_interval(addr, cidr) in self.pool[str(cidr)]: addr = before_interval(addr, cidr) self.pool[str(cidr)].remove(addr) cidr = cidr - 1 else: break self.pool[str(cidr)].append(addr) # sort interval with key=ip_to_int(IP) self.pool[str(cidr)].sort(key=ip_to_int) return [True, "Free success"] # EnumPool : manage network ips with ip or ip list # Data Structure : [ A, B, C, ... X ] , A is a IP address class EnumPool(object): def __init__(self, addr_cidr=None, copy=None): if addr_cidr: self.pool = [] [addr, cidr] = addr_cidr.split('/') cidr=int(cidr) addr=fix_ip(addr, cidr) self.info = addr+"/"+str(cidr) # init enum pool # first IP is network id, last IP is network broadcast address # first and last IP can not be allocated for i in range(1, pow(2, 32-cidr)-1): self.pool.append(int_to_ip(ip_to_int(addr)+i)) elif copy: self.info = copy['info'] self.pool = copy['pool'] else: logger.error("EnumPool init failed with no addr_cidr or copy") def __str__(self): return json.dumps({'info':self.info, 'pool':self.pool}) def printpool(self): print (str(self.pool)) def acquire(self, num=1): if num > len(self.pool): return [False, "No enough IPs: %s" % self.info] result = [] for i in range(0, num): result.append(self.pool.pop()) return [True, result] def acquire_cidr(self, num=1): [status, result] = self.acquire(int(num)) if not status: return [status, result] return [True, list(map(lambda x:x+"/"+self.info.split('/')[1], result))] def inrange(self, ip): addr = self.info.split('/')[0] addrint = ip_to_int(addr) cidr = int(self.info.split('/')[1]) if addrint+1 <= ip_to_int(ip) <= addrint+pow(2, 32-cidr)-2: return True return False def release(self, ip_or_ips): if type(ip_or_ips) == str: ips = [ ip_or_ips ] else: ips = ip_or_ips # check whether all IPs are not in the pool but in the range of pool for ip in ips: ip = ip.split('/')[0] if (ip in self.pool) or (not self.inrange(ip)): return [False, 'release IPs failed for ip already existing or ip exceeding the network pool, ips to be released: %s, ip pool is: %s and content is : %s' % (ips, self.info, self.pool)] for ip in ips: # maybe ip is in format IP/CIDR ip = ip.split('/')[0] self.pool.append(ip) return [True, "release success"] # wrap EnumPool with vlanid and gateway class UserPool(EnumPool): def __init__(self, addr_cidr=None, copy=None): if addr_cidr: EnumPool.__init__(self, addr_cidr = addr_cidr) #self.vlanid=vlanid self.pool.sort(key=ip_to_int) self.gateway = self.pool[0] self.pool.remove(self.gateway) elif copy: EnumPool.__init__(self, copy = copy) #self.vlanid = int(copy['vlanid']) self.gateway = copy['gateway'] else: logger.error("UserPool init failed with no addr_cidr or copy") def get_gateway(self): return self.gateway def get_gateway_cidr(self): return self.gateway+"/"+self.info.split('/')[1] def inrange(self, ip): addr = self.info.split('/')[0] addrint = ip_to_int(addr) cidr = int(self.info.split('/')[1]) if addrint+2 <= ip_to_int(ip) <= addrint+pow(2, 32-cidr)-2: return True return False def printpool(self): print("net info:"+self.info+", gateway:"+self.gateway) print (str(self.pool)) # NetworkMgr : mange docklet network ip address # center : interval pool to allocate and free network block with IP/CIDR # system : enumeration pool to acquire and release system ip address # users : set of users' enumeration pools to manage users' ip address class NetworkMgr(object): def __init__(self, addr_cidr, etcdclient, mode, masterip): self.etcd = etcdclient self.masterip = masterip self.user_locks = threading.Lock() if mode == 'new': logger.info("init network manager with %s" % addr_cidr) self.center = IntervalPool(addr_cidr=addr_cidr) # allocate a pool for system IPs, use CIDR=27, has 32 IPs syscidr = 27 [status, sysaddr] = self.center.allocate(syscidr) if status == False: logger.error ("allocate system ips in __init__ failed") sys.exit(1) # maybe for system, the last IP address of CIDR is available # But, EnumPool drop the last IP address in its pool -- it is not important self.system = EnumPool(sysaddr+"/"+str(syscidr)) self.usrgws = {} self.users = {} #self.vlanids = {} #self.init_vlanids(4095, 60) #self.init_shared_vlanids() self.dump_center() self.dump_system() elif mode == 'recovery': logger.info("init network manager from etcd") self.center = None self.system = None self.usrgws = {} self.users = {} #self.vlanids = {} self.load_center() self.load_system() #self.load_vlanids() #self.load_shared_vlanids() else: logger.error("mode: %s not supported" % mode) '''def init_vlanids(self, total, block): self.vlanids['block'] = block self.etcd.setkey("network/vlanids/info", str(total)+"/"+str(block)) for i in range(1, int((total-1)/block)): self.etcd.setkey("network/vlanids/"+str(i), json.dumps(list(range(1+block*(i-1), block*i+1)))) self.vlanids['currentpool'] = list(range(1+block*i, total+1)) self.vlanids['currentindex'] = i+1 self.etcd.setkey("network/vlanids/"+str(i+1), json.dumps(self.vlanids['currentpool'])) self.etcd.setkey("network/vlanids/current", str(i+1))''' # Data Structure: # shared_vlanids = [{vlanid = ..., sharenum = ...}, {vlanid = ..., sharenum = ...}, ...] '''def init_shared_vlanids(self, vlannum = 128, sharenum = 128): self.shared_vlanids = [] for i in range(vlannum): shared_vlanid = {} [status, shared_vlanid['vlanid']] = self.acquire_vlanid() shared_vlanid['sharenum'] = sharenum self.shared_vlanids.append(shared_vlanid) self.etcd.setkey("network/shared_vlanids", json.dumps(self.shared_vlanids)) def load_vlanids(self): [status, info] = self.etcd.getkey("network/vlanids/info") self.vlanids['block'] = int(info.split("/")[1]) [status, current] = self.etcd.getkey("network/vlanids/current") self.vlanids['currentindex'] = int(current) if self.vlanids['currentindex'] == 0: self.vlanids['currentpool'] = [] else: [status, pool]= self.etcd.getkey("network/vlanids/"+str(self.vlanids['currentindex'])) self.vlanids['currentpool'] = json.loads(pool) def dump_vlanids(self): if self.vlanids['currentpool'] == []: if self.vlanids['currentindex'] != 0: self.etcd.delkey("network/vlanids/"+str(self.vlanids['currentindex'])) self.etcd.setkey("network/vlanids/current", str(self.vlanids['currentindex']-1)) else: pass else: self.etcd.setkey("network/vlanids/"+str(self.vlanids['currentindex']), json.dumps(self.vlanids['currentpool'])) def load_shared_vlanids(self): [status, shared_vlanids] = self.etcd.getkey("network/shared_vlanids") if not status: self.init_shared_vlanids() else: self.shared_vlanids = json.loads(shared_vlanids) def dump_shared_vlanids(self): self.etcd.setkey("network/shared_vlanids", json.dumps(self.shared_vlanids))''' def load_center(self): [status, centerdata] = self.etcd.getkey("network/center") center = json.loads(centerdata) self.center = IntervalPool(copy = center) def dump_center(self): self.etcd.setkey("network/center", json.dumps({'info':self.center.info, 'pool':self.center.pool})) def load_system(self): [status, systemdata] = self.etcd.getkey("network/system") system = json.loads(systemdata) self.system = EnumPool(copy=system) def dump_system(self): self.etcd.setkey("network/system", json.dumps({'info':self.system.info, 'pool':self.system.pool})) def load_user(self, username): [status, userdata] = self.etcd.getkey("network/users/"+username) usercopy = json.loads(userdata) user = UserPool(copy = usercopy) logger.debug("load user into dict") self.users[username] = user def dump_user(self, username): logger.debug("dump user into etcd") self.etcd.setkey("network/users/"+username, json.dumps({'info':self.users[username].info, 'gateway':self.users[username].gateway, 'pool':self.users[username].pool})) def load_usrgw(self,username): [status, data] = self.etcd.getkey("network/usrgws/"+username) if status: self.usrgws[username] = data def dump_usrgw(self, username): self.etcd.setkey("network/usrgws/"+username, self.usrgws[username]) def printpools(self): print ("
") self.center.printpool() print ("") self.system.printpool() print ("") print (" users in users is in etcd, not in memory") #print ("") #print (str(self.vlanids['currentindex'])+":"+str(self.vlanids['currentpool'])) '''def acquire_vlanid(self, isshared = False): if isshared: # only share vlanid of the front entry # if sharenum is reduced to 0, move the front entry to the back # if sharenum is still equal to 0, one round of sharing is complete, start another one if self.shared_vlanids[0]['sharenum'] == 0: self.shared_vlanids.append(self.shared_vlanids.pop(0)) if self.shared_vlanids[0]['sharenum'] == 0: logger.info("shared vlanids not enough, add user to full vlanids") for shared_vlanid in self.shared_vlanids: shared_vlanid['sharenum'] = 128 self.shared_vlanids[0]['sharenum'] -= 1 self.dump_shared_vlanids() return [True, self.shared_vlanids[0]['vlanid']] if self.vlanids['currentpool'] == []: if self.vlanids['currentindex'] == 0: return [False, "No VLAN IDs"] else: logger.error("vlanids current pool is empty with current index not zero") return [False, "internal error"] vlanid = self.vlanids['currentpool'].pop() self.dump_vlanids() if self.vlanids['currentpool'] == []: self.load_vlanids() return [True, vlanid] def release_vlanid(self, vlanid): if len(self.vlanids['currentpool']) == self.vlanids['block']: self.vlanids['currentpool'] = [vlanid] self.vlanids['currentindex'] = self.vanids['currentindex']+1 self.dump_vlanids() else: self.vlanids['currentpool'].append(vlanid) self.dump_vlanids() return [True, "Release VLAN ID success"]''' def has_usrgw(self, username): self.load_usrgw(username) return username in self.usrgws.keys() def setup_usrgw(self, input_rate_limit, output_rate_limit, username, uid, nodemgr, workerip=None): if not self.has_user(username): return [False,"user doesn't exist."] self.load_usrgw(username) if username in self.usrgws.keys(): return [False,"user's gateway has been set up."] self.load_user(username) usrpools = self.users[username] if(workerip is not None): ip = workerip worker = nodemgr.ip_to_rpc(workerip) logger.info("setup gateway for %s with %s on %s" % (username, usrpools.get_gateway_cidr(), ip)) self.usrgws[username] = ip self.dump_usrgw(username) worker.setup_gw('docklet-br-'+str(uid), username, usrpools.get_gateway_cidr(), input_rate_limit, output_rate_limit) else: logger.info("setup gateway for %s with %s on master" % (username, usrpools.get_gateway_cidr() )) self.usrgws[username] = self.masterip self.dump_usrgw(username) netcontrol.setup_gw('docklet-br-'+str(uid), username, usrpools.get_gateway_cidr(), input_rate_limit, output_rate_limit) self.dump_user(username) del self.users[username] return [True, "set up gateway success"] def add_user(self, username, cidr, isshared = False): logger.info ("add user %s with cidr=%s" % (username, str(cidr))) self.user_locks.acquire() try: if self.has_user(username): return [False, "user already exists in users set"] [status, result] = self.center.allocate(cidr) self.dump_center() if status == False: return [False, result] '''[status, vlanid] = self.acquire_vlanid(isshared) if status: vlanid = int(vlanid) else: self.center.free(result, cidr) self.dump_center() return [False, vlanid]''' self.users[username] = UserPool(addr_cidr = result+"/"+str(cidr)) #logger.info("setup gateway for %s with %s and vlan=%s" % (username, self.users[username].get_gateway_cidr(), str(vlanid))) #netcontrol.setup_gw('docklet-br', username, self.users[username].get_gateway_cidr(), str(vlanid)) self.dump_user(username) del self.users[username] return [True, 'add user success'] except Exception as ex: logger.error(str(ex)) return [False, str(ex)] finally: self.user_locks.release() def del_usrgwbr(self, username, uid, nodemgr): self.load_usrgw(username) if username not in self.usrgws.keys(): return [False, "user does't have gateway or user doesn't exist."] ip = self.usrgws[username] logger.info("Delete user %s(%s) gateway on %s" %(username, str(uid), ip)) if ip == self.masterip: netcontrol.del_gw('docklet-br-'+str(uid), username) netcontrol.del_bridge('docklet-br-'+str(uid)) else: worker = nodemgr.ip_to_rpc(ip) worker.del_gw('docklet-br-'+str(uid), username) worker.del_bridge('docklet-br-'+str(uid)) del self.usrgws[username] self.etcd.delkey("network/usrgws/"+username) return [True, 'delete user\' gateway success'] def del_user(self, username): self.user_locks.acquire() try: if not self.has_user(username): return [False, username+" not in users set"] self.load_user(username) [addr, cidr] = self.users[username].info.split('/') logger.info ("delete user %s with cidr=%s" % (username, int(cidr))) self.center.free(addr, int(cidr)) self.dump_center() #if not isshared: #self.release_vlanid(self.users[username].vlanid) #netcontrol.del_gw('docklet-br', username) self.etcd.deldir("network/users/"+username) del self.users[username] return [True, 'delete user success'] except Exception as ex: logger.error(traceback.format_exc()) return [False, str(ex)] finally: self.user_locks.release() def check_usergw(self, input_rate_limit, output_rate_limit, username, uid, nodemgr, distributedgw=False): logger.info("Check %s(%s) user gateway."%(username, str(uid))) if not self.has_user(username): return [False,"user doesn't exist."] self.load_usrgw(username) if username not in self.usrgws.keys(): self.usrgws[username] = self.masterip self.dump_usrgw(username) ip = self.usrgws[username] self.load_user(username) if not distributedgw: if not ip == self.masterip: self.del_usrgwbr(username,uid,nodemgr) self.usrgws[username] = self.masterip self.dump_usrgw(username) netcontrol.check_gw('docklet-br-'+str(uid), username, uid, self.users[username].get_gateway_cidr(), input_rate_limit, output_rate_limit) logger.info("recover gw success") else: worker = nodemgr.ip_to_rpc(ip) nodemgr.call_rpc_function(worker,'check_gw',['docklet-br-'+str(uid), username, uid, self.users[username].get_gateway_cidr(), input_rate_limit, output_rate_limit]) del self.users[username] return [True, 'check gw ok'] def check_usergre(self, username, uid, remote, nodemgr, distributedgw=False): logger.info("Check %s(%s) gre from gateway host to %s." % (username, str(uid), remote)) self.load_usrgw(username) if username not in self.usrgws.keys(): return [False, 'user does not exist.'] ip = self.usrgws[username] if not distributedgw: if not remote == self.masterip: ovscontrol.add_port_gre_withkey('docklet-br-'+str(uid), 'gre-'+str(uid)+'-'+remote, remote, uid) else: if not remote == ip: worker = nodemgr.ip_to_rpc(ip) nodemgr.call_rpc_function(worker,'add_port_gre_withkey',['docklet-br-'+str(uid), 'gre-'+str(uid)+'-'+remote, remote, uid]) return [True, 'check gre ok'] def has_user(self, username): [status, _value] = self.etcd.getkey("network/users/"+username) return status def acquire_userips(self, username, num=1): logger.info ("acquire user ips of %s" % (username)) if not self.has_user(username): return [False, 'username not exists in users set'] self.load_user(username) result = self.users[username].acquire(num) self.dump_user(username) del self.users[username] return result def acquire_userips_cidr(self, username, num=1): logger.info ("acquire user ips of %s" % (username)) if not self.has_user(username): return [False, 'username not exists in users set'] self.load_user(username) result = self.users[username].acquire_cidr(num) self.dump_user(username) del self.users[username] return result # ip_or_ips : one IP address or a list of IPs def release_userips(self, username, ip_or_ips): logger.info ("release user ips of %s with ips: %s" % (username, str(ip_or_ips))) if not self.has_user(username): return [False, 'username not exists in users set'] self.load_user(username) result = self.users[username].release(ip_or_ips) self.dump_user(username) del self.users[username] return result def get_usergw(self, username): if not self.has_user(username): return [False, 'username not exists in users set'] self.load_user(username) result = self.users[username].get_gateway() self.dump_user(username) del self.users[username] return result def get_usergw_cidr(self, username): if not self.has_user(username): return [False, 'username not exists in users set'] self.load_user(username) result = self.users[username].get_gateway_cidr() self.dump_user(username) del self.users[username] return result '''def get_uservlanid(self, username): if not self.has_user(username): return [False, 'username not exists in users set'] self.load_user(username) result = self.users[username].vlanid self.dump_user(username) del self.users[username] return result''' def acquire_sysips(self, num=1): logger.info ("acquire system ips") result = self.system.acquire(num) self.dump_system() return result def acquire_sysips_cidr(self, num=1): logger.info ("acquire system ips") result = self.system.acquire_cidr(num) self.dump_system() return result def release_sysips(self, ip_or_ips): logger.info ("acquire system ips: %s" % str(ip_or_ips)) result = self.system.release(ip_or_ips) self.dump_system() return result ================================================ FILE: src/master/nodemgr.py ================================================ #!/usr/bin/python3 import threading, random, time, xmlrpc.client, sys #import network from utils.nettools import netcontrol,ovscontrol from utils.log import logger from utils import env from queue import Queue ########################################## # NodeMgr # Description : manage the physical nodes # 1. list running nodes now # 2. update node list when new node joins # ETCD table : # machines/allnodes -- all nodes in docklet, for recovery # machines/runnodes -- run nodes of this start up ############################################## class NodeMgr(object): def __init__(self, networkmgr, etcdclient, addr, mode): self.addr = addr logger.info ("begin initialize on %s" % self.addr) self.networkmgr = networkmgr self.etcd = etcdclient self.mode = mode self.workerport = env.getenv('WORKER_PORT') self.tasks = {} self.recover_queue = Queue(maxsize=0) # delete the existing network logger.info ("delete the existing network") [success, bridges] = ovscontrol.list_bridges() if success: for bridge in bridges: if bridge.startswith("docklet-br"): ovscontrol.del_bridge(bridge) else: logger.error(bridges) '''if self.mode == 'new': if netcontrol.bridge_exists('docklet-br'): netcontrol.del_bridge('docklet-br') netcontrol.new_bridge('docklet-br') else: if not netcontrol.bridge_exists('docklet-br'): logger.error("docklet-br not found") sys.exit(1)''' # get allnodes self.allnodes = self._nodelist_etcd("allnodes") self.runnodes = [] self.batchnodes = [] self.allrunnodes = [] [status, runlist] = self.etcd.listdir("machines/runnodes") for node in runlist: nodeip = node['key'].rsplit('/',1)[1] if node['value'] == 'ok': logger.info ("running node %s" % nodeip) self.runnodes.append(nodeip) logger.info ("all nodes are: %s" % self.allnodes) logger.info ("run nodes are: %s" % self.runnodes) # start new thread to watch whether a new node joins logger.info ("start thread to watch new nodes ...") self.thread_watchnewnode = threading.Thread(target=self._watchnewnode) self.thread_watchnewnode.start() # wait for all nodes joins # while(True): for i in range(10): allin = True for node in self.allnodes: if node not in self.runnodes: allin = False break if allin: logger.info("all nodes necessary joins ...") break time.sleep(1) logger.info ("run nodes are: %s" % self.runnodes) # get nodes list from etcd table def _nodelist_etcd(self, which): if which == "allnodes" or which == "runnodes": [status, nodeinfo]=self.etcd.listdir("machines/"+which) if status: nodelist = [] for node in nodeinfo: nodelist.append(node["key"].rsplit('/', 1)[1]) return nodelist return [] # thread target : watch whether a new node joins def _watchnewnode(self): while(True): time.sleep(0.1) [status, runlist] = self.etcd.listdir("machines/runnodes") if not status: logger.warning ("get runnodes list failed from etcd ") continue etcd_runip = [] for node in runlist: nodeip = node['key'].rsplit('/',1)[1] if node['value']=='waiting': # waiting state can be deleted, there is no use to let master check # this state because worker will change it and master will not change it now. # it is only preserved for compatible. logger.info ("%s want to joins, call it to init first" % nodeip) elif node['value']=='work': logger.info ("new node %s joins" % nodeip) etcd_runip.append(nodeip) # setup GRE tunnels for new nodes '''if self.addr == nodeip: logger.debug ("worker start on master node. not need to setup GRE") else: logger.debug ("setup GRE for %s" % nodeip) if netcontrol.gre_exists('docklet-br', nodeip): logger.debug("GRE for %s already exists, reuse it" % nodeip) else: netcontrol.setup_gre('docklet-br', nodeip)''' self.etcd.setkey("machines/runnodes/"+nodeip, "ok") if nodeip not in self.runnodes: self.runnodes.append(nodeip) # node not in all node list is a new node. if nodeip not in self.allnodes: self.allnodes.append(nodeip) self.etcd.setkey("machines/allnodes/"+nodeip, "ok") else: # recover node self.recover_queue.put(nodeip) #if nodeip in self.tasks: # recover_task = threading.Thread(target = self.recover_node, args=(nodeip,self.tasks[nodeip])) # recover_task.start() # del self.tasks[nodeip] logger.debug ("all nodes are: %s" % self.allnodes) logger.debug ("run nodes are: %s" % self.runnodes) elif node['value'] == 'ok': etcd_runip.append(nodeip) new_runnodes = [] for nodeip in self.runnodes: if nodeip not in etcd_runip: logger.info ("Worker %s is stopped, remove %s:%s from rpc client list" % (nodeip, nodeip, self.workerport)) #print(self.runnodes) #print(etcd_runip) #print(self.rpcs) self.runnodes = etcd_runip self.batchnodes = self.runnodes.copy() self.allrunnodes = self.runnodes.copy() [status, batchlist] = self.etcd.listdir("machines/batchnodes") if status: for node in batchlist: nodeip = node['key'].rsplit('/', 1)[1] self.batchnodes.append(nodeip) self.allrunnodes.append(nodeip) def recover_node(self,ip,tasks): logger.info("now recover for worker:%s" % ip) worker = self.ip_to_rpc(ip) for task in tasks: taskname = task['taskname'] taskargs = task['args'] logger.info("recover task:%s in worker:%s" % (taskname, ip)) eval('worker.'+taskname)(*taskargs) # get all run nodes' IP addr def get_nodeips(self): return self.allrunnodes def get_batch_nodeips(self): return self.batchnodes def get_base_nodeips(self): return self.runnodes def get_allnodes(self): return self.allnodes def ip_to_rpc(self,ip): if ip in self.allrunnodes: return xmlrpc.client.ServerProxy("http://%s:%s" % (ip, env.getenv("WORKER_PORT"))) else: return None #logger.info('Worker %s is not connected, create rpc client failed, push task into queue') #if not ip in self.tasks: # self.tasks[ip] = [] #return self.tasks[ip] def call_rpc_function(self, worker, function, args): #if type(worker) is list: # worker.append({'taskname':function,'args':args}) # return [True, 'append task success'] #else: if worker is None: logger.error("worker is None, fail to call rpc function.") return None else: return eval('worker.'+function)(*args) ================================================ FILE: src/master/notificationmgr.py ================================================ import json from utils.log import logger from utils.model import db, Notification, NotificationGroups, User, UserNotificationPair from master.userManager import administration_required, token_required import smtplib from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.header import Header from datetime import datetime from utils import env from master.settings import settings class NotificationMgr: def __init__(self): logger.info("Notification Manager init...") try: Notification.query.all() except: db.create_all() try: NotificationGroups.query.all() except: db.create_all() try: UserNotificationPair.query.all() except: db.create_all() logger.info("Notification Manager init done!") def query_user_notifications(self, user): group_name = user.user_group notifies = NotificationGroups.query.filter_by(group_name=group_name).all() notifies.extend(NotificationGroups.query.filter_by(group_name='all').all()) notify_ids = [notify.notification_id for notify in notifies] notify_ids = sorted(list(set(notify_ids)), reverse=True) return [Notification.query.filter_by(id=notify_id).first() for notify_id in notify_ids] def mail_notification(self, notify_id): email_from_address = settings.get('EMAIL_FROM_ADDRESS') if (email_from_address in ['\'\'', '\"\"', '']): return {'success' : 'true'} notify = Notification.query.filter_by(id=notify_id).first() notify_groups = NotificationGroups.query.filter_by(notification_id=notify_id).all() to_addr = [] groups = [] for group in notify_groups: groups.append(group.group_name) if 'all' in groups: users = User.query.all() for user in users: to_addr.append(user.e_mail) else: for group in notify_groups: users = User.query.filter_by(user_group=group.group_name).all() for user in users: to_addr.append(user.e_mail) content = notify.content text = '

Dear '+ 'user' + ':

' #user.username + ':' text += '''

      Your account in %s has been recieved a notification:

%s


      Note: DO NOT reply to this email!



Docklet Team, SEI, PKU

''' % (env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL"), content) text += '

'+ str(datetime.utcnow()) + '

' text += '' subject = 'Docklet Notification: ' + notify.title msg = MIMEMultipart() textmsg = MIMEText(text,'html','utf-8') msg['Subject'] = Header(subject, 'utf-8') msg['From'] = email_from_address msg.attach(textmsg) s = smtplib.SMTP() s.connect() for address in to_addr: try: msg['To'] = address s.sendmail(email_from_address, address, msg.as_string()) except Exception as e: logger.error(e) s.close() return {"success": 'true'} @administration_required def create_notification(self, *args, **kwargs): ''' Usage: createNotification(cur_user = 'Your current user', form = 'Post form') Post form: {title: 'Your title', content: 'Your content', groups: "['groupA', 'groupB']"} ''' form = kwargs['form'] notify = Notification(form['title'], form['content']) group_names = form.getlist('groups') db.session.add(notify) db.session.commit() # groups = json.loads(form['groups']) # for group_name in groups: if 'all' in group_names: group_names = ['all'] for group_name in group_names: if group_name == 'none': continue notify_groups = NotificationGroups(notify.id, group_name) db.session.add(notify_groups) db.session.commit() if 'sendMail' in form: self.mail_notification(notify.id) users = User.query.all() for user in users: user_group = user.user_group for group_name in group_names: if user_group == group_name: tempPair = UserNotificationPair(user.username, notify.id) db.session.add(tempPair) break; db.session.commit() return {"success": 'true'} @administration_required def list_notifications(self, *args, **kwargs): notifies = Notification.query.all() notify_infos = [] for notify in notifies: if notify is None or notify.status == 'deleted': continue groups = NotificationGroups.query.filter_by(notification_id=notify.id).all() notify_infos.append({ 'id': notify.id, 'title': notify.title, 'content': notify.content, 'create_date': notify.create_date, 'status': notify.status, 'groups': [group.group_name for group in groups] }) notify_infos.reverse() return {'success': 'true', 'data': notify_infos} @administration_required def modify_notification(self, *args, **kwargs): form = kwargs['form'] notify_id = form['notify_id'] notify = Notification.query.filter_by(id=notify_id).first() notify.title = form['title'] notify.content = form['content'] notify.status = form['status'] notifies_groups = NotificationGroups.query.filter_by(notification_id=notify_id).all() for notify_groups in notifies_groups: db.session.delete(notify_groups) group_names = form.getlist('groups') if 'all' in group_names: group_names = ['all'] for group_name in group_names: if group_name == 'none': continue notify_groups = NotificationGroups(notify.id, group_name) db.session.add(notify_groups) db.session.commit() if 'sendMail' in form: self.mail_notification(notify_id) return {"success": 'true'} @administration_required def delete_notification(self, *args, **kwargs): form = kwargs['form'] notify_id = form['notify_id'] notify = Notification.query.filter_by(id=notify_id).first() # notify.status = 'deleted' notifies_groups = NotificationGroups.query.filter_by(notification_id=notify_id).all() for notify_groups in notifies_groups: db.session.delete(notify_groups) db.session.delete(notify) db.session.commit() temppairs = UserNotificationPair.query.filter_by(notifyId=notify_id).all() for temppair in temppairs: db.session.delete(temppair) db.session.commit() return {"success": 'true'} @token_required def query_self_notification_simple_infos(self, *args, **kwargs): user = kwargs['cur_user'] username = user.username notifies = self.query_user_notifications(user) notify_simple_infos = [] for notify in notifies: if notify is None or notify.status != 'open': continue notifyid = notify.id temppair = UserNotificationPair.query.filter_by(userName=username, notifyId=notifyid).first() if temppair == None: isRead = 0 temppair = UserNotificationPair(username, notifyid) db.session.add(temppair) db.session.commit() else: isRead = temppair.isRead notify_simple_infos.append({ 'id': notify.id, 'title': notify.title, 'create_date': notify.create_date, 'isRead': isRead }) return {'success': 'true', 'data': notify_simple_infos} @token_required def query_self_notifications_infos(self, *args, **kwargs): user = kwargs['cur_user'] username = user.username notifies = self.query_user_notifications(user) notify_infos = [] for notify in notifies: if notify is None or notify.status != 'open': continue notifyid = notify.id temppair = UserNotificationPair.query.filter_by(userName=username, notifyId=notifyid).first() if temppair == None: temppair = UserNotificationPair(username, notifyid) db.session.add(temppair) isRead = 1 temppair.isRead = 1 db.session.add(temppair) db.session.commit() notify_infos.append({ 'id': notify.id, 'title': notify.title, 'content': notify.content, 'create_date': notify.create_date, 'isRead': isRead }) return {'success': 'true', 'data': notify_infos} @token_required def query_notification(self, *args, **kwargs): user = kwargs['cur_user'] form = kwargs['form'] group_name = user.user_group notify_id = form['notify_id'] groups = NotificationGroups.query.filter_by(notification_id=notify_id).all() if not(group_name in [group.group_name for group in groups]): if not('all' in [group.group_name for group in groups]): return {'success': 'false', 'reason': 'Unauthorized Action'} notify = Notification.query.filter_by(id=notify_id).first() notify_info = { 'id': notify.id, 'title': notify.title, 'content': notify.content, 'create_date': notify.create_date } usernotifypair = UserNotificationPair.query.filter_by(userName=user.username, notifyId=notify.id).first() usernotifypair.isRead = 1 db.session.add(usernotifypair) db.session.commit() if notify.status != 'open': notify_info['title'] = 'This notification is not available' notify_info['content'] = 'Sorry, it seems that the administrator has closed this notification.' return {'success': 'false', 'data': notify_info} return {'success': 'true', 'data': notify_info} ================================================ FILE: src/master/parser.py ================================================ #!/user/bin/python3 import json job_data = {'image_1': 'base_base_base', 'mappingRemoteDir_2_2': 'sss', 'dependency_1': 'aaa', 'mappingLocalDir_2_1': 'xxx', 'mappingLocalDir_1_2': 'aaa', 'mappingLocalDir_1_1': 'aaa', 'mappingLocalDir_2_3': 'fdsffdf', 'mappingRemoteDir_1_1': 'ddd', 'mappingRemoteDir_2_3': 'sss', 'srcAddr_1': 'aaa', 'mappingSource_2_1': 'Aliyun', 'cpuSetting_1': '1', 'mappingSource_2_2': 'Aliyun', 'retryCount_2': '1', 'mappingSource_1_1': 'Aliyun', 'expTime_1': '60', 'diskSetting_2': '1024', 'diskSetting_1': '1024', 'dependency_2': 'ddd', 'memorySetting_1': '1024', 'command_2': 'ccc', 'mappingRemoteDir_1_2': 'ddd', 'gpuSetting_2': '0', 'memorySetting_2': '1024', 'gpuSetting_1': '0', 'mappingLocalDir_2_2': 'bbb', 'mappingSource_1_2': 'Aliyun', 'expTime_2': '60', 'mappingRemoteDir_2_1': 'vvv', 'srcAddr_2': 'fff', 'cpuSetting_2': '1', 'instCount_1': '1', 'mappingSource_2_3': 'Aliyun', 'token': 'ZXlKaGJHY2lPaUpJVXpJMU5pSXNJbWxoZENJNk1UVXpNelE0TVRNMU5Td2laWGh3SWpveE5UTXpORGcwT1RVMWZRLmV5SnBaQ0k2TVgwLkF5UnRnaGJHZXhJY2lBSURZTUd5eXZIUVJnUGd1ZTA3OEtGWkVoejJVMkE=', 'instCount_2': '1', 'retryCount_1': '1', 'command_1': 'aaa', 'jobPriority': '0', 'image_2': 'base_base_base', 'jobName': 'aaa'} def parse(job_data): job_info = {} message = {} for key in job_data: key_arr = key.split('_') value = job_data[key] if len(key_arr) == 1: job_info[key_arr[0]] = value elif len(key_arr) == 2: key_prefix, task_idx = key_arr[0], key_arr[1] task_idx = 'task_' + task_idx if task_idx in job_info: job_info[task_idx][key_prefix] = value else: tmp_dict = { key_prefix: value } job_info[task_idx] = tmp_dict elif len(key_arr) == 3: key_prefix, task_idx, mapping_idx = key_arr[0], key_arr[1], key_arr[2] task_idx = 'task_' + task_idx mapping_idx = 'mapping_' + mapping_idx if task_idx in job_info: if "mapping" in job_info[task_idx]: if mapping_idx in job_info[task_idx]["mapping"]: job_info[task_idx]["mapping"][mapping_idx][key_prefix] = value else: tmp_dict = { key_prefix: value } job_info[task_idx]["mapping"][mapping_idx] = tmp_dict else: job_info[task_idx]["mapping"] = { mapping_idx: { key_prefix: value } } else: tmp_dict = { "mapping":{ mapping_idx: { key_prefix: value } } } job_info[task_idx] = tmp_dict print(json.dumps(job_info, indent=4)) if __name__ == '__main__': parse(job_data) ================================================ FILE: src/master/releasemgr.py ================================================ import threading, time, requests, json, traceback from utils import env from utils.log import logger from utils.model import db, VCluster, Container import smtplib, datetime from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.header import Header from master.settings import settings userpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT')) def post_to_user(url = '/', data={}): return requests.post(userpoint+url,data=data).json() _ONE_DAY_IN_SECONDS = 60 * 60 * 24 class ReleaseMgr(threading.Thread): def __init__(self, vclustermgr, ulockmgr, check_interval=_ONE_DAY_IN_SECONDS): threading.Thread.__init__(self) self.thread_stop = False self.vclustermgr = vclustermgr self.ulockmgr = ulockmgr self.check_interval = check_interval self.warning_days = int(env.getenv("WARNING_DAYS")) self.release_days = int(env.getenv("RELEASE_DAYS")) if self.release_days <= self.warning_days: self.release_days = self.warning_days+1 logger.info("[ReleaseMgr] start withe warning_days=%d release_days=%d"%(self.warning_days, self.release_days)) def _send_email(self, to_address, username, vcluster, days, is_released=True): email_from_address = settings.get('EMAIL_FROM_ADDRESS') if (email_from_address in ['\'\'', '\"\"', '']): return text = '

Dear '+ username + ':

' st_str = vcluster.stop_time.strftime("%Y-%m-%d %H:%M:%S") text += '''

      Your workspace/vcluster(name:%s id:%d) in %s has been stopped more than %d days now(stopped at:%s).

''' % (vcluster.clustername, vcluster.clusterid, env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL"), days, st_str) if is_released: text += '''

      Therefore, the workspace/vcluster has been released now.

      And the data in it couldn't be recoverd unless you save it.

      You can create new workspace/vcluster if you need.

''' else: #day_d = self.release_days - (datetime.datetime.now() - vcluster.stop_time).days release_date = vcluster.stop_time + datetime.timedelta(days=self.release_days) day_d = (release_date - datetime.datetime.now()).days rd_str = release_date.strftime("%Y-%m-%d %H:%M:%S") text += '''

      It will be released after %s(in about %d days).

      And the data in it couldn't be recoverd after releasing.

      Please start or save it before %s(in about %d days) if you want to keep the data.

''' % (rd_str, day_d, rd_str, day_d) text += '''

      Note: DO NOT reply to this email!



Docklet Team, SEI, PKU

''' subject = 'Docklet workspace/vcluster releasing alert' msg = MIMEMultipart() textmsg = MIMEText(text,'html','utf-8') msg['Subject'] = Header(subject, 'utf-8') msg['From'] = email_from_address msg['To'] = to_address msg.attach(textmsg) s = smtplib.SMTP() s.connect() try: s.sendmail(email_from_address, to_address, msg.as_string()) except Exception as err: logger.error(traceback.format_exc()) s.close() def run(self): while not self.thread_stop: logger.info("[ReleaseMgr] Begin checking each vcluster if it needs to be released...") auth_key = env.getenv('AUTH_KEY') res = post_to_user("/master/user/groupinfo/", {'auth_key':auth_key}) groups = json.loads(res['groups']) quotas = {} for group in groups: quotas[group['name']] = group['quotas'] vcs = VCluster.query.filter_by(status='stopped').all() #logger.info(str(vcs)) for vc in vcs: vc = VCluster.query.get(vc.clusterid) if vc.stop_time is None: continue days = (datetime.datetime.now() - vc.stop_time).days if days >= self.release_days: logger.info("[ReleaseMgr] VCluster(id:%d,user:%s) has been stopped(%s) for more than %d days, it will be released." % (vc.clusterid, vc.ownername, vc.stop_time.strftime("%Y-%m-%d %H:%M:%S"), self.release_days)) rc_info = post_to_user("/master/user/recoverinfo/", {'username':vc.ownername,'auth_key':auth_key}) logger.info("[ReleaseMgr] %s"%str(rc_info)) groupname = rc_info['groupname'] user_info = {"data":{"id":rc_info['uid'],"group":groupname,"groupinfo":quotas[groupname]}} self.ulockmgr.acquire(vc.ownername) try: [status, usage_info] = self.vclustermgr.get_clustersetting(vc.clustername, vc.ownername, "all", True) success, msg = self.vclustermgr.delete_cluster(vc.clustername, vc.ownername, json.dumps(user_info)) if not success: logger.error("[ReleaseMgr] Can't release VCluster(id:%d,user:%s) for %s"%(vc.clusterid, vc.ownername, msg)) else: if status: logger.info("[ReleaseMgr] Release Quota.") post_to_user("/master/user/usageRelease/", {'auth_key':auth_key,'username':vc.ownername, 'cpu':usage_info['cpu'], 'memory':usage_info['memory'],'disk':usage_info['disk']}) self._send_email(rc_info['email'], vc.ownername, vc, self.release_days) logger.info("[ReleaseMgr] Succeed to releasing VCluster(id:%d,user:%s) for %s. Send mail to info."%(vc.clusterid, vc.ownername, msg)) except Exception as err: logger.error(traceback.format_exc()) finally: self.ulockmgr.release(vc.ownername) elif days >= self.warning_days: logger.info("[ReleaseMgr] VCluster(id:%d,user:%s) has been stopped(%s) for more than %d days. A warning email will be sent to the user." % (vc.clusterid, vc.ownername, vc.stop_time.strftime("%Y-%m-%d %H:%M:%S"), self.warning_days)) if vc.is_warned: logger.info("[ReleaseMgr] VCluster(id:%d,user:%s) has been warned before. Skip it."% (vc.clusterid, vc.ownername)) continue rc_info = post_to_user("/master/user/recoverinfo/", {'username':vc.ownername,'auth_key':auth_key}) logger.info("[ReleaseMgr] %s"%str(rc_info)) self._send_email(rc_info['email'], vc.ownername, vc, self.warning_days, False) vc.is_warned = True try: db.session.commit() except Exception as err: db.session.rollback() logger.warning(traceback.format_exc()) time.sleep(self.check_interval) def stop(self): self.thread_stop = True return ================================================ FILE: src/master/settings.py ================================================ #!/usr/bin/python3 from utils import env import json, os from functools import wraps from utils.log import logger class settingsClass: setting = {} def __init__(self): settingPath = env.getenv('FS_PREFIX') + '/local/settings.conf' if not os.path.exists(settingPath): settingFile = open(settingPath,'w') setting = {} settingFile.write(json.dumps(setting)) settingFile.close() else: settingFile = open(settingPath, 'r') settingText = settingFile.read() settingFile.close() self.setting = json.loads(settingText) def get(self, arg): return self.setting.get(arg,'') def list(*args, **kwargs): if ( ('user_group' in kwargs) == False): return {"success":'false', "reason":"Cannot get user_group"} user_group = kwargs['user_group'] if (not ((user_group == 'admin') or (user_group == 'root'))): return {"success": 'false', "reason": 'Unauthorized Action'} return {'success': 'true', 'result': args[0].setting} def update(*args, **kwargs): try: if ( ('user_group' in kwargs) == False): return {"success":'false', "reason":"Cannot get user_group"} user_group = kwargs['user_group'] if (not ((user_group == 'admin') or (user_group == 'root'))): return {"success": 'false', "reason": 'Unauthorized Action'} newSetting = kwargs['newSetting'] settingPath = env.getenv('FS_PREFIX') + '/local/settings.conf'; settingText = json.dumps(newSetting) settingFile = open(settingPath,'w') settingFile.write(settingText) settingFile.close() args[0].setting = newSetting return {'success': 'true'} except: return {'success': 'false'} settings = settingsClass() ================================================ FILE: src/master/sysmgr.py ================================================ import re, string, os editableParms = ["LOG_LEVEL","ADMIN_EMAIL_ADDRESS","EMAIL_FROM_ADDRESS","OPEN_REGISTRY","APPROVAL_RBT"] configPath = {"docklet": os.environ.get("DOCKLET_CONF")+"/docklet.conf", "container": os.environ.get("DOCKLET_CONF")+"/container.conf"} #configPath = {"docklet": "../conf/docklet.conf", # "container": "../conf/container.conf"} defaultPattern = re.compile(u'# *\S+ *= *\S+') activePattern = re.compile(u'\S+ *= *\S+') historyPattern = re.compile(u'## *\S+ *= *\S+') def parse_line(line): kind = "" parm = "" val = "" if defaultPattern.match(line) != None and not "==" in line: kind = "default" elif activePattern.match(line) != None and not "#" in line: kind = "active" elif historyPattern.match(line) != None and not "==" in line: kind = "history" if kind != "": line = line.replace("#", "").replace("\n", "") parm = line[:line.find("=")].strip() val = line[line.find("=")+1:].strip() return [kind, parm, val] class SystemManager(): def getParmList(*args, **kwargs): #result = {"docklet": "", "container": ""} result = {"docklet": "", "container": ""} for field in ["docklet"]: configFile = open(configPath[field]) lines = configFile.readlines() configFile.close() configFile = open(configPath[field]) wholeFile = configFile.read() configFile.close() conf = {} segs = wholeFile.split("\n\n") for line in lines: [linekind, lineparm, lineval] = parse_line(line) if lineparm in editableParms: editable = 1 # edit it in settings.py else: editable = 0 if linekind == "default": conf[lineparm] = {"val": "novalidvaluea", "default": lineval, "history": [], "editable": editable, "details": ""} for line in lines: [linekind, lineparm, lineval] = parse_line(line) if linekind == "active": try: conf[lineparm]["val"] = lineval except: if lineparm in editableParms: editable = 1 else: editable = 0 conf[lineparm] = {"val": lineval, "default": lineval, "history": [], "editable": editable, "details": ""} for line in lines: [linekind, lineparm, lineval] = parse_line(line) if linekind == "history": conf[lineparm]["history"].append(lineval) for parm in conf.keys(): for seg in segs: if parm in seg: conf[parm]["details"] = seg result[field] = [({'parm': parm, 'val': conf[parm]['val'], 'default': conf[parm]['default'], "history": conf[parm]['history'], "editable": conf[parm]['editable'], "details": conf[parm]['details']}) for parm in sorted(conf.keys())] configFile = open(configPath["container"]) wholeFile = configFile.read() configFile.close() result["container"] = wholeFile return result # 1. def and not act 2. act and not def 3. def and act # have def and act and hist def modify(self, field, parm, val): configFile = open(configPath[field]) lines = configFile.readlines() configFile.close() finish = False for i in range(0, len(lines)): line = lines[i] [linekind, lineparm, lineval] = parse_line(line) if linekind == "active" and lineparm == parm: lines[i] = "## " + parm + "=" + lineval + "\n" lines.insert(i, parm + "=" + val + "\n") if i == 0 or not parm in lines[i-1] or not "=" in lines[i-1]: lines.insert(i, "# " + parm + "=" + lineval + "\n") finish = True break if finish == False: for i in range(0, len(lines)): line = lines[i] [linekind, lineparm, lineval] = parse_line(line) if linekind == "default" and parm == lineparm: lines.insert(i+1, parm + "=" + val + "\n") break for i in range(0, len(lines)): line = lines[i] [linekind, lineparm, lineval] = parse_line(line) if linekind == "history" and parm == lineparm and val == lineval: lines.pop(i) break configFile = open(configPath[field], "w") for line in lines: configFile.write(line) configFile.close() os.environ[parm] = val return [True, ""] def clear(self, field, parm): configFile = open(configPath[field]) lines = configFile.readlines() configFile.close() finish = False for i in range(0, len(lines)): line = lines[i] [linekind, lineparm, lineval] = parse_line(line) if linekind == "history" and parm == lineparm: lines[i] = "" configFile = open(configPath[field], "w") for line in lines: configFile.write(line) configFile.close() return [True, ""] def add(self, field, parm, val): configFile = open(configPath[field], "a") configFile.write("\n" + "# " + parm + "=" + val + "\n" + parm + "=" + val + "\n") configFile.close() return [True, ""] def delete(self, field, parm): configFile = open(configPath[field]) lines = configFile.readlines() configFile.close() for i in range(0, len(lines)): line = lines[i] if parm in line: lines[i] = "" configFile = open(configPath[field], "w") for line in lines: configFile.write(line) configFile.close() return [True, ""] def reset_all(self, field): configFile = open(configPath[field]) lines = configFile.readlines() configFile.close() conf = {} for line in lines: [linekind, lineparm, lineval] = parse_line(line) if linekind == "default": conf[lineparm] = {"val": lineval, "default": lineval, "history": []} for line in lines: [linekind, lineparm, lineval] = parse_line(line) if linekind == "active": try: conf[lineparm]["val"] = lineval except: conf[lineparm] = {"val": lineval, "default": lineval, "history": []} for line in lines: [linekind, lineparm, lineval] = parse_line(line) if linekind == "history": conf[lineparm]["history"].append(lineval) for i in range(0, len(lines)): line = lines[i] if activePattern.match(line) != None and not "#" in line: segs = line.replace("\n", "").split("=") lines[i] = segs[0].strip() + "=" + conf[segs[0].strip()]["default"] + "\n" elif historyPattern.match(line) != None and not "==" in line: lines[i] = "" configFile = open(configPath[field], "w") for line in lines: configFile.write(line) configFile.close() return [True, ""] #sysmgr = SystemManager() #print(sysmgr.getParmList()) ================================================ FILE: src/master/taskmgr.py ================================================ import threading import time import string import os import random, copy, subprocess import json, math from functools import wraps # must import logger after initlogging, ugly from utils.log import logger # grpc from concurrent import futures import grpc from protos.rpc_pb2 import * from protos.rpc_pb2_grpc import MasterServicer, add_MasterServicer_to_server, WorkerStub from utils.nettools import netcontrol from utils import env def ip_to_int(addr): [a, b, c, d] = addr.split('.') return (int(a)<<24) + (int(b)<<16) + (int(c)<<8) + int(d) def int_to_ip(num): return str((num>>24)&255)+"."+str((num>>16)&255)+"."+str((num>>8)&255)+"."+str(num&255) class Task(): def __init__(self, taskmgr, task_id, username, at_same_time, priority, max_size, task_infos): self.taskmgr = taskmgr self.id = task_id self.username = username self.status = WAITING self.failed_reason = "" # if all the vnodes must be started at the same time self.at_same_time = at_same_time # priority the bigger the better # self.priority the smaller the better self.priority = int(time.time()) / 60 / 60 - priority self.task_base_ip = None self.ips = None self.max_size = max_size self.gpu_preference = task_infos[0]['gpu_preference'] self.order = -1 # scheduling order of the task self.subtask_list = [SubTask( idx = index, root_task = self, vnode_info = task_info['vnode_info'], command_info = task_info['command_info'], max_retry_count = task_info['max_retry_count'], gpu_preference = task_info['gpu_preference'] ) for (index, task_info) in enumerate(task_infos)] def get_billing(self): billing_beans = 0 running_time = 0 cpu_price = 1 / 3600.0 # /core*s mem_price = 1 / 3600.0 # /GB*s disk_price = 1 / 3600.0 # /GB*s gpu_price = 100 / 3600.0 # /core*s for subtask in self.subtask_list: tmp_time = subtask.running_time cpu_beans = subtask.vnode_info.vnode.instance.cpu * tmp_time * cpu_price mem_beans = subtask.vnode_info.vnode.instance.memory / 1024.0 * tmp_time * mem_price disk_beans = subtask.vnode_info.vnode.instance.disk / 1024.0 * tmp_time * disk_price worker_info = self.taskmgr.get_worker_resource_info(subtask.worker) worker_gpu_price = worker_info['gpu_price'] / 3600.0 gpu_beans = subtask.vnode_info.vnode.instance.gpu * tmp_time * gpu_price logger.info("subtask:%s running_time=%f beans for: cpu=%f mem_beans=%f disk_beans=%f gpu_beans=%f" %(self.id, tmp_time, cpu_beans, mem_beans, disk_beans, gpu_beans )) beans = math.ceil(cpu_beans + mem_beans + disk_beans + gpu_beans) running_time += tmp_time billing_beans += beans return running_time, billing_beans def __lt__(self, other): return self.priority < other.priority def gen_ips_from_base(self,base_ip): if self.task_base_ip == None: return self.ips = [] for i in range(self.max_size): self.ips.append(int_to_ip(base_ip + self.task_base_ip + i + 2)) def gen_hosts(self): username = self.username taskid = self.id logger.info("Generate hosts for user(%s) task(%s) base_ip(%s)"%(username,taskid,str(self.task_base_ip))) fspath = env.getenv('FS_PREFIX') if not os.path.isdir("%s/global/users/%s" % (fspath,username)): path = env.getenv('DOCKLET_LIB') subprocess.call([path+"/master/userinit.sh", username]) logger.info("user %s directory not found, create it" % username) hosts_file = open("%s/global/users/%s/hosts/%s.hosts" % (fspath,username,"batch-"+taskid),"w") hosts_file.write("127.0.0.1 localhost\n") i = 0 for ip in self.ips: hosts_file.write(ip+" batch-"+str(i)+"\n") i += 1 hosts_file.close() class SubTask(): def __init__(self, idx, root_task, vnode_info, command_info, max_retry_count, gpu_preference): self.root_task = root_task self.vnode_info = vnode_info self.vnode_info.vnodeid = idx self.command_info = command_info if self.command_info != None: self.command_info.vnodeid = idx self.max_retry_count = max_retry_count self.gpu_preference = gpu_preference self.vnode_started = False self.task_started = False self.start_at = 0 self.end_at = 0 self.running_time = 0 self.status = WAITING self.status_reason = '' self.try_count = 0 self.worker = None self.lock = threading.Lock() def waiting_for_retry(self,reason=""): self.try_count += 1 self.status = WAITING if self.try_count <= self.max_retry_count else FAILED if self.status == FAILED: self.root_task.status = FAILED self.failed_reason = reason self.root_task.failed_reason = reason class TaskReporter(MasterServicer): def __init__(self, taskmgr): self.taskmgr = taskmgr def report(self, request, context): for task_report in request.taskmsgs: self.taskmgr.on_task_report(task_report) return Reply(status=Reply.ACCEPTED, message='') class TaskMgr(threading.Thread): # load task information from etcd # initial a task queue and task schedueler # taskmgr: a taskmgr instance def __init__(self, nodemgr, monitor_fetcher, master_ip, scheduler_interval=2, external_logger=None): threading.Thread.__init__(self) self.thread_stop = False self.jobmgr = None self.master_ip = master_ip self.task_queue = [] self.lazy_append_list = [] self.lazy_delete_list = [] self.lazy_stop_list = [] self.task_queue_lock = threading.Lock() self.stop_lock = threading.Lock() self.add_lock = threading.Lock() #self.user_containers = {} self.scheduler_interval = scheduler_interval self.logger = logger self.master_port = env.getenv('BATCH_MASTER_PORT') self.worker_port = env.getenv('BATCH_WORKER_PORT') # nodes self.nodemgr = nodemgr self.monitor_fetcher = monitor_fetcher self.cpu_usage = {} self.gpu_usage = {} # self.all_nodes = None # self.last_nodes_info_update_time = 0 # self.nodes_info_update_interval = 30 # (s) self.gpu_pending_tasks = {} self.network_lock = threading.Lock() batch_net = env.getenv('BATCH_NET') self.batch_cidr = int(batch_net.split('/')[1]) batch_net = batch_net.split('/')[0] task_cidr = int(env.getenv('BATCH_TASK_CIDR')) task_cidr = min(task_cidr,31-self.batch_cidr) self.task_cidr = max(task_cidr,2) self.base_ip = ip_to_int(batch_net) self.free_nets = [] for i in range(0, (1 << (32-self.batch_cidr)) - 1, (1 << self.task_cidr)): self.free_nets.append(i) #self.logger.info("Free nets addresses pool %s" % str(self.free_nets)) self.logger.info("Each Batch Net CIDR:%s"%(str(self.task_cidr))) def data_lock(lockname): def lock(f): @wraps(f) def new_f(self, *args, **kwargs): lockobj = getattr(self,lockname) lockobj.acquire() try: result = f(self, *args, **kwargs) except Exception as err: lockobj.release() raise err lockobj.release() return result return new_f return lock def subtask_lock(f): @wraps(f) def new_f(self, subtask, *args, **kwargs): subtask.lock.acquire() try: result = f(self, subtask, *args, **kwargs) except Exception as err: subtask.lock.release() raise err subtask.lock.release() return result return new_f def run(self): self.serve() while not self.thread_stop: self.sort_out_task_queue() task, sub_task_list = self.task_scheduler() if task is not None and sub_task_list is not None: self.task_processor(task, sub_task_list) else: time.sleep(self.scheduler_interval) def serve(self): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) add_MasterServicer_to_server(TaskReporter(self), self.server) self.server.add_insecure_port('[::]:' + self.master_port) self.server.start() self.logger.info('[taskmgr_rpc] start rpc server') def stop(self): self.thread_stop = True self.server.stop(0) self.logger.info('[taskmgr_rpc] stop rpc server') @data_lock('task_queue_lock') @data_lock('add_lock') @data_lock('stop_lock') def sort_out_task_queue(self): for task in self.task_queue: if task.id in self.lazy_stop_list: self.stop_remove_task(task) self.lazy_delete_list.append(task) running_time, billing = task.get_billing() self.logger.info('task %s stopped, running_time:%s billing:%d'%(task.id, str(running_time), billing)) running_time = math.ceil(running_time) self.jobmgr.report(task.username, task.id,'stopped',running_time=running_time,billing=billing) while self.lazy_delete_list: task = self.lazy_delete_list.pop(0) try: self.task_queue.remove(task) except Exception as err: self.logger.warning(str(err)) new_append_list = [] for task in self.lazy_append_list: if task.id in self.lazy_stop_list: self.jobmgr.report(task.username, task.id, 'stopped') else: new_append_list.append(task) self.lazy_append_list = new_append_list self.lazy_stop_list.clear() if self.lazy_append_list: self.task_queue.extend(self.lazy_append_list) self.lazy_append_list.clear() self.task_queue = sorted(self.task_queue, key=lambda x: x.priority) self.gpu_pending_tasks = {} no_pref_task_counts = 0 for task in self.task_queue: if task.gpu_preference == 'null': task.order = no_pref_task_counts no_pref_task_counts += 1 else: if task.gpu_preference not in self.gpu_pending_tasks: self.gpu_pending_tasks[task.gpu_preference] = 0 task.order = no_pref_task_counts + self.gpu_pending_tasks[task.gpu_preference] self.gpu_pending_tasks[task.gpu_preference] += 1 self.gpu_pending_tasks['null'] = no_pref_task_counts def start_vnode(self, subtask): try: self.logger.info('[task_processor] Starting vnode for task [%s] vnode [%d]' % (subtask.vnode_info.taskid, subtask.vnode_info.vnodeid)) channel = grpc.insecure_channel('%s:%s' % (subtask.worker, self.worker_port)) stub = WorkerStub(channel) response = stub.start_vnode(subtask.vnode_info) if response.status != Reply.ACCEPTED: raise Exception(response.message) except Exception as e: self.logger.error('[task_processor] rpc error message: %s' % e) subtask.status_reason = str(e) return [False, e] subtask.vnode_started = True subtask.start_at = time.time() self.cpu_usage[subtask.worker] += subtask.vnode_info.vnode.instance.cpu self.gpu_usage[subtask.worker] += subtask.vnode_info.vnode.instance.gpu return [True, ''] @subtask_lock def stop_vnode(self, subtask): if not subtask.vnode_started: return [True, ""] try: self.logger.info('[task_processor] Stopping vnode for task [%s] vnode [%d]' % (subtask.vnode_info.taskid, subtask.vnode_info.vnodeid)) channel = grpc.insecure_channel('%s:%s' % (subtask.worker, self.worker_port)) stub = WorkerStub(channel) response = stub.stop_vnode(subtask.vnode_info) if response.status != Reply.ACCEPTED: raise Exception(response.message) except Exception as e: self.logger.error('[task_processor] rpc error message: %s' % e) subtask.status_reason = str(e) return [False, e] subtask.vnode_started = False subtask.end_at = time.time() subtask.running_time += subtask.end_at - subtask.start_at self.cpu_usage[subtask.worker] -= subtask.vnode_info.vnode.instance.cpu self.gpu_usage[subtask.worker] -= subtask.vnode_info.vnode.instance.gpu return [True, ''] def start_subtask(self, subtask): try: self.logger.info('[task_processor] Starting task [%s] vnode [%d]' % (subtask.vnode_info.taskid, subtask.vnode_info.vnodeid)) channel = grpc.insecure_channel('%s:%s' % (subtask.worker, self.worker_port)) stub = WorkerStub(channel) response = stub.start_task(subtask.command_info) if response.status != Reply.ACCEPTED: raise Exception(response.message) except Exception as e: self.logger.error('[task_processor] rpc error message: %s' % e) subtask.status_reason = str(e) return [False, e] subtask.task_started = True return [True, ''] def stop_subtask(self, subtask): try: self.logger.info('[task_processor] Stopping task [%s] vnode [%d]' % (subtask.vnode_info.taskid, subtask.vnode_info.vnodeid)) channel = grpc.insecure_channel('%s:%s' % (subtask.worker, self.worker_port)) stub = WorkerStub(channel) response = stub.stop_task(subtask.command_info) if response.status != Reply.ACCEPTED: raise Exception(response.message) except Exception as e: self.logger.error('[task_processor] rpc error message: %s' % e) subtask.status = FAILED subtask.status_reason = str(e) return [False, e] subtask.task_started = False return [True, ''] @data_lock('network_lock') def acquire_task_ips(self, task): self.logger.info("[acquire_task_ips] user(%s) task(%s) net(%s)" % (task.username, task.id, str(task.task_base_ip))) if task.task_base_ip == None: task.task_base_ip = self.free_nets.pop(0) return task.task_base_ip @data_lock('network_lock') def release_task_ips(self, task): self.logger.info("[release_task_ips] user(%s) task(%s) net(%s)" % (task.username, task.id, str(task.task_base_ip))) if task.task_base_ip == None: return self.free_nets.append(task.task_base_ip) task.task_base_ip = None #self.logger.error('[release task_net] %s' % str(e)) def setup_tasknet(self, task, workers=None): taskid = task.id username = task.username brname = "docklet-batch-%s-%s"%(username, taskid) gwname = taskid if task.task_base_ip == None: return [False, "task.task_base_ip is None!"] gatewayip = int_to_ip(self.base_ip + task.task_base_ip + 1) gatewayipcidr = gatewayip + "/" + str(32-self.task_cidr) netcontrol.new_bridge(brname) netcontrol.setup_gw(brname,gwname,gatewayipcidr,0,0) for wip in workers: if wip != self.master_ip: netcontrol.setup_gre(brname,wip) return [True, gatewayip] def remove_tasknet(self, task): taskid = task.id username = task.username brname = "docklet-batch-%s-%s"%(username, taskid) netcontrol.del_bridge(brname) def task_processor(self, task, sub_task_list): task.status = RUNNING self.jobmgr.report(task.username, task.id, 'running') # properties for transactio self.acquire_task_ips(task) task.gen_ips_from_base(self.base_ip) task.gen_hosts() #need to create hosts [success, gwip] = self.setup_tasknet(task, [sub_task.worker for sub_task in sub_task_list]) if not success: self.release_task_ips(task) return [False, gwip] placed_workers = [] start_all_vnode_success = True # start vc for sub_task in sub_task_list: vnode_info = sub_task.vnode_info vnode_info.vnode.hostname = "batch-" + str(vnode_info.vnodeid % task.max_size) if sub_task.vnode_started: continue username = sub_task.root_task.username #container_name = task.info.username + '-batch-' + task.info.id + '-' + str(instance_id) + '-' + task.info.token #if not username in self.user_containers.keys(): #self.user_containers[username] = [] #self.user_containers[username].append(container_name) ipaddr = task.ips[vnode_info.vnodeid % task.max_size] + "/" + str(32-self.task_cidr) brname = "docklet-batch-%s-%s" % (username, sub_task.root_task.id) networkinfo = Network(ipaddr=ipaddr, gateway=gwip, masterip=self.master_ip, brname=brname) vnode_info.vnode.network.CopyFrom(networkinfo) placed_workers.append(sub_task.worker) [success, msg] = self.start_vnode(sub_task) if not success: sub_task.waiting_for_retry("Fail to start vnode.") if sub_task.status == WAITING: self.jobmgr.report(task.username, task.id, 'retrying', "Fail to start vnode.") sub_task.worker = None start_all_vnode_success = False if not start_all_vnode_success: return # start tasks for sub_task in sub_task_list: task_info = sub_task.command_info if task_info is None or sub_task.status == RUNNING: sub_task.status = RUNNING continue task_info.token = ''.join(random.sample(string.ascii_letters + string.digits, 8)) [success, msg] = self.start_subtask(sub_task) if success: sub_task.status = RUNNING else: sub_task.waiting_for_retry("Fail to start task.") if sub_task.status == WAITING: self.jobmgr.report(task.username, task.id, 'retrying', "Fail to start task.") def clear_sub_tasks(self, sub_task_list): for sub_task in sub_task_list: self.clear_sub_task(sub_task) def clear_sub_task(self, sub_task): if sub_task.task_started: self.stop_subtask(sub_task) #pass if sub_task.vnode_started: self.stop_vnode(sub_task) #pass @data_lock('stop_lock') def lazy_stop_task(self, taskid): self.lazy_stop_list.append(taskid) def stop_remove_task(self, task): if task is None: return self.logger.info("[taskmgr] stop and remove task(%s)"%task.id) self.clear_sub_tasks(task.subtask_list) self.release_task_ips(task) self.remove_tasknet(task) def check_task_completed(self, task): if task.status == RUNNING or task.status == WAITING: for sub_task in task.subtask_list: if sub_task.command_info != None and (sub_task.status == RUNNING or sub_task.status == WAITING): return False self.logger.info('task %s finished, status %d, subtasks: %s' % (task.id, task.status, str([sub_task.status for sub_task in task.subtask_list]))) self.stop_remove_task(task) self.lazy_delete_list.append(task) running_time, billing = task.get_billing() self.logger.info('task %s running_time:%s billing:%d'%(task.id, str(running_time), billing)) running_time = math.ceil(running_time) if task.status == FAILED: self.jobmgr.report(task.username,task.id,"failed",task.failed_reason,task.subtask_list[0].max_retry_count+1, running_time, billing) else: self.jobmgr.report(task.username,task.id,'finished',running_time=running_time,billing=billing) return True # this method is called when worker send heart-beat rpc request def on_task_report(self, report): self.logger.info('[on_task_report] receive task report: id %s-%d, status %d' % (report.taskid, report.vnodeid, report.subTaskStatus)) task = self.get_task(report.taskid) if task == None: self.logger.error('[on_task_report] task not found') return sub_task = task.subtask_list[report.vnodeid] if sub_task.command_info.token != report.token: self.logger.warning('[on_task_report] wrong token, %s %s' % (sub_task.command_info.token, report.token)) return username = task.username # container_name = username + '-batch-' + task.info.id + '-' + str(report.instanceid) + '-' + report.token # self.user_containers[username].remove(container_name) if sub_task.status != RUNNING: self.logger.error('[on_task_report] receive task report when vnode is not running') #sub_task.status = report.subTaskStatus sub_task.status_reason = report.errmsg sub_task.task_started = False if report.subTaskStatus == FAILED or report.subTaskStatus == TIMEOUT: self.clear_sub_task(sub_task) sub_task.waiting_for_retry(report.errmsg) self.logger.info('task %s report failed, status %d, subtasks: %s' % (task.id, task.status, str([sub_task.status for sub_task in task.subtask_list]))) if sub_task.status == WAITING: self.jobmgr.report(task.username, task.id, 'retrying', report.errmsg) elif report.subTaskStatus == OUTPUTERROR: self.clear_sub_task(sub_task) sub_task.status = FAILED task.status = FAILED task.failed_reason = report.errmsg elif report.subTaskStatus == COMPLETED: sub_task.status = report.subTaskStatus self.clear_sub_task(sub_task) # return task, workers def task_scheduler(self): # simple FIFO with priority self.logger.info('[task_scheduler] scheduling... (%d tasks remains)' % len(self.task_queue)) gpu_has_pending_task = set() for task in self.task_queue: if task in self.lazy_delete_list or task.id in self.lazy_stop_list: continue self.logger.info('task %s sub_tasks %s' % (task.id, str([sub_task.status for sub_task in task.subtask_list]))) if self.check_task_completed(task): continue self.logger.info('schedule task %s sub_tasks %s' % (task.id, str([sub_task.status for sub_task in task.subtask_list]))) if task.at_same_time: # parallel tasks if not self.has_waiting(task.subtask_list): continue # 如果偏好的gpu类型前面已经有其他任务在等待了,就直接跳过调度 if task.gpu_preference in gpu_has_pending_task: continue workers = self.find_proper_workers(task.subtask_list) if len(workers) == 0: # 如果找不到合适的节点,且存在gpu偏好,则允许不需要该gpu的节点先行调度 if task.gpu_preference is not None and task.gpu_preference != 'null': gpu_has_pending_task.add(task.gpu_preference) continue return None, None else: for i in range(len(workers)): task.subtask_list[i].worker = workers[i] return task, task.subtask_list else: # traditional tasks has_waiting = False for sub_task in task.subtask_list: if sub_task.status == WAITING: has_waiting = True # 如果偏好的gpu类型前面已经有其他任务在等待了,就直接跳过调度 if sub_task.gpu_preference in gpu_has_pending_task: continue workers = self.find_proper_workers([sub_task]) if len(workers) > 0: sub_task.worker = workers[0] return task, [sub_task] if has_waiting: # 如果找不到合适的节点,且存在gpu偏好,则允许不需要该gpu的节点先行调度 if task.gpu_preference is not None and task.gpu_preference != 'null': gpu_has_pending_task.add(task.gpu_preference) continue return None, None return None, None def has_waiting(self, sub_task_list): for sub_task in sub_task_list: if sub_task.status == WAITING: return True return False def find_proper_workers(self, sub_task_list, all_res=False): nodes = self.get_all_nodes() if nodes is None or len(nodes) == 0: self.logger.warning('[task_scheduler] running nodes not found') return None proper_workers = [] has_waiting = False for sub_task in sub_task_list: if sub_task.status == WAITING: has_waiting = True if sub_task.worker is not None and sub_task.vnode_started: proper_workers.append(sub_task.worker) continue needs = sub_task.vnode_info.vnode.instance self.logger.info('sub_task %s-%d' %(sub_task.root_task.id, sub_task.vnode_info.vnodeid)) self.logger.info(str(needs)) #logger.info(needs) proper_worker = None for worker_ip, worker_info in nodes: self.logger.info('worker ip' + worker_ip) self.logger.info('cpu usage: ' + str(self.get_cpu_usage(worker_ip))) self.logger.info('gpu usage: ' + str(self.get_gpu_usage(worker_ip))) self.logger.info('worker_info: ' + str(worker_info)) #logger.info(worker_info) #logger.info(self.get_cpu_usage(worker_ip)) if needs.gpu > 0 and sub_task.gpu_preference is not None and sub_task.gpu_preference != 'null' and sub_task.gpu_preference != worker_info['gpu_name']: continue if needs.cpu + (not all_res) * self.get_cpu_usage(worker_ip) > worker_info['cpu']: continue elif needs.memory > worker_info['memory']: continue elif needs.disk > worker_info['disk']: continue # try not to assign non-gpu task to a worker with gpu #if needs['gpu'] == 0 and worker_info['gpu'] > 0: #continue elif needs.gpu + (not all_res) * self.get_gpu_usage(worker_ip) > worker_info['gpu']: continue else: worker_info['cpu'] -= needs.cpu worker_info['memory'] -= needs.memory worker_info['gpu'] -= needs.gpu worker_info['disk'] -= needs.disk proper_worker = worker_ip break if proper_worker is not None: proper_workers.append(proper_worker) else: return [] if has_waiting: return proper_workers else: return [] def get_all_nodes(self): # cache running nodes # if self.all_nodes is not None and time.time() - self.last_nodes_info_update_time < self.nodes_info_update_interval: # return self.all_nodes # get running nodes node_ips = self.nodemgr.get_batch_nodeips() all_nodes = [(node_ip, self.get_worker_resource_info(node_ip)) for node_ip in node_ips] return all_nodes def is_alive(self, worker): nodes = self.nodemgr.get_batch_nodeips() return worker in nodes def get_worker_resource_info(self, worker_ip): fetcher = self.monitor_fetcher(worker_ip) worker_info = fetcher.info info = {} info['cpu'] = len(worker_info['cpuconfig']) info['memory'] = (worker_info['meminfo']['buffers'] + worker_info['meminfo']['cached'] + worker_info['meminfo']['free']) / 1024 # (Mb) info['disk'] = sum([disk['free'] for disk in worker_info['diskinfo']]) / 1024 / 1024 # (Mb) info['gpu'] = len(worker_info['gpuinfo']) info['gpu_name'] = worker_info['gpuinfo'][0]['name'] if len(worker_info['gpuinfo']) > 0 else '' info['gpu_price'] = worker_info['gpuinfo'][0]['price'] if len(worker_info['gpuinfo']) > 0 else 0 return info def get_cpu_usage(self, worker_ip): try: return self.cpu_usage[worker_ip] except: self.cpu_usage[worker_ip] = 0 return 0 def get_gpu_usage(self, worker_ip): try: return self.gpu_usage[worker_ip] except: self.gpu_usage[worker_ip] = 0 return 0 # save the task information into database # called when jobmgr assign task to taskmgr @data_lock('add_lock') def add_task(self, username, taskid, json_task, task_priority=1): # decode json string to object defined in grpc self.logger.info('[taskmgr add_task] receive task %s' % taskid) image_dict = { "private": Image.PRIVATE, "base": Image.BASE, "public": Image.PUBLIC } max_size = (1 << self.task_cidr) - 2 if int(json_task['vnodeCount']) > max_size: # tell jobmgr self.jobmgr.report(username,taskid,"failed","vnodeCount exceed limits.") return False task = Task( taskmgr = self, task_id = taskid, username = username, # all vnode must be started at the same time at_same_time = 'atSameTime' in json_task.keys(), priority = task_priority, max_size = (1 << self.task_cidr) - 2, task_infos = [{ 'gpu_preference': json_task['gpuPreference'] if int(json_task['gpuSetting']) > 0 else 'null', 'max_retry_count': int(json_task['retryCount']), 'vnode_info': VNodeInfo( taskid = taskid, username = username, vnode = VNode( image = Image( name = '_'.join(json_task['image'].split('_')[:-2]), #json_task['cluster']['image']['name'], type = image_dict[json_task['image'].split('_')[-1]], #json_task['cluster']['image']['type'], owner = username if not json_task['image'].split('_')[-2] else json_task['image'].split('_')[-2]), #json_task['cluster']['image']['owner']), instance = Instance( cpu = int(json_task['cpuSetting']), memory = int(json_task['memorySetting']), disk = int(json_task['diskSetting']), gpu = int(json_task['gpuSetting'])), mount = [Mount( provider = json_task['mapping'][mapping_key]['mappingProvider'], localPath = json_task['mapping'][mapping_key]['mappingMountpath'], remotePath = json_task['mapping'][mapping_key]['mappingBucketName'], accessKey = json_task['mapping'][mapping_key]['mappingAccessKey'], secretKey = json_task['mapping'][mapping_key]['mappingSecretKey'], other = json_task['mapping'][mapping_key]['mappingEndpoint'] ) for mapping_key in json_task['mapping']] if 'mapping' in json_task else [] ), ), 'command_info': TaskInfo( taskid = taskid, username = username, parameters = Parameters( command = Command( commandLine = json_task['command'], packagePath = json_task['srcAddr'], envVars = {}), stderrRedirectPath = json_task.get('stdErrRedPth',""), stdoutRedirectPath = json_task.get('stdOutRedPth',"")), timeout = int(json_task['expTime']) # commands are executed in all vnodes / only excuted in the first vnode # if in traditional mode, commands will be executed in all vnodes ) if (json_task['runon'] == 'all' or vnode_index == 0) else None } for vnode_index in range(int(json_task['vnodeCount']))]) if task.at_same_time: workers = self.find_proper_workers(task.subtask_list, all_res=True) if len(workers) == 0: task.status = FAILED # tell jobmgr self.jobmgr.report(username,taskid,"failed","Resources needs exceed limits") return False else: for sub_task in task.subtask_list: workers = self.find_proper_workers([sub_task], all_res=True) if len(workers) == 0: task.status = FAILED # tell jobmgr self.jobmgr.report(username,taskid,"failed","Resources needs exceed limits") return False self.lazy_append_list.append(task) return True @data_lock('task_queue_lock') def get_task_list(self): return self.task_queue.copy() @data_lock('task_queue_lock') def get_pending_gpu_tasks_info(self): return self.gpu_pending_tasks def get_task_order(self, taskid): task = self.get_task(taskid) if task is not None: return task.order return -1 @data_lock('task_queue_lock') def get_task(self, taskid): for task in self.task_queue: if task.id == taskid: return task return None def set_jobmgr(self, jobmgr): self.jobmgr = jobmgr # get names of all the batch containers of the user def get_user_batch_containers(self,username): return [] # if not username in self.user_containers.keys(): # return [] # else: # return self.user_containers[username] ================================================ FILE: src/master/testTaskCtrler.py ================================================ import sys if sys.path[0].endswith("master"): sys.path[0] = sys.path[0][:-6] import grpc,time from protos import rpc_pb2, rpc_pb2_grpc import random, string def run(): channel = grpc.insecure_channel('localhost:50051') stub = rpc_pb2_grpc.WorkerStub(channel) comm = rpc_pb2.Command(commandLine="ls /root;sleep 5;ls /root", packagePath="/root", envVars={'test1':'10','test2':'20'}) # | awk '{print \"test\\\"\\n\"}' paras = rpc_pb2.Parameters(command=comm, stderrRedirectPath="/root/nfs/batch_{jobid}/", stdoutRedirectPath="/root/nfs/batch_{jobid}/") img = rpc_pb2.Image(name="base", type=rpc_pb2.Image.BASE, owner="docklet") inst = rpc_pb2.Instance(cpu=1, memory=1000, disk=1000, gpu=0) mnt = rpc_pb2.Mount(localPath="",provider='aliyun',remotePath="test-for-docklet",other="oss-cn-beijing.aliyuncs.com",accessKey="LTAIdl7gmmIhfqA9",secretKey="") clu = rpc_pb2.Cluster(image=img, instance=inst, mount=[]) task = rpc_pb2.TaskInfo(id="test",username="root",instanceid=1,instanceCount=1,maxRetryCount=1,parameters=paras,cluster=clu,timeout=60000,token=''.join(random.sample(string.ascii_letters + string.digits, 8))) response = stub.process_task(task) print("Batch client received: " + str(response.status)+" "+response.message) def stop_task(): channel = grpc.insecure_channel('localhost:50051') stub = rpc_pb2_grpc.WorkerStub(channel) taskmsg = rpc_pb2.TaskMsg(taskid="test",username="root",instanceid=1,instanceStatus=rpc_pb2.COMPLETED,token="test",errmsg="") reportmsg = rpc_pb2.ReportMsg(taskmsgs = [taskmsg]) response = stub.stop_tasks(reportmsg) print("Batch client received: " + str(response.status)+" "+response.message) if __name__ == '__main__': #for i in range(10): run() #time.sleep(4) #stop_task() ================================================ FILE: src/master/testTaskMgr.py ================================================ import master.taskmgr from concurrent import futures import grpc from protos.rpc_pb2 import * from protos.rpc_pb2_grpc import * import threading, json, time, random from utils import env class SimulatedNodeMgr(): def get_batch_nodeips(self): return ['0.0.0.0'] class SimulatedMonitorFetcher(): def __init__(self, ip): self.info = {} self.info['cpuconfig'] = [1,1,1,1,1,1,1,1] self.info['meminfo'] = {} self.info['meminfo']['free'] = 8 * 1024 * 1024 # (kb) simulate 8 GB memory self.info['meminfo']['buffers'] = 8 * 1024 * 1024 self.info['meminfo']['cached'] = 8 * 1024 * 1024 self.info['diskinfo'] = [] self.info['diskinfo'].append({}) self.info['diskinfo'][0]['free'] = 16 * 1024 * 1024 * 1024 # (b) simulate 16 GB disk self.info['gpuinfo'] = [1,1] class SimulatedTaskController(WorkerServicer): def __init__(self, worker): self.worker = worker def start_vnode(self, vnodeinfo, context): print('[SimulatedTaskController] start vnode, taskid [%s] vnodeid [%d]' % (vnodeinfo.taskid, vnodeinfo.vnodeid)) return Reply(status=Reply.ACCEPTED,message="") def stop_vnode(self, vnodeinfo, context): print('[SimulatedTaskController] stop vnode, taskid [%s] vnodeid [%d]' % (vnodeinfo.taskid, vnodeinfo.vnodeid)) return Reply(status=Reply.ACCEPTED,message="") def start_task(self, taskinfo, context): print('[SimulatedTaskController] start task, taskid [%s] vnodeid [%d] token [%s]' % (taskinfo.taskid, taskinfo.vnodeid, taskinfo.token)) worker.process(taskinfo) return Reply(status=Reply.ACCEPTED,message="") def stop_task(self, taskinfo, context): print('[SimulatedTaskController] stop task, taskid [%s] vnodeid [%d] token [%s]' % (taskinfo.taskid, taskinfo.vnodeid, taskinfo.token)) return Reply(status=Reply.ACCEPTED,message="") class SimulatedWorker(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.thread_stop = False self.tasks = [] def run(self): worker_port = env.getenv('BATCH_WORKER_PORT') server = grpc.server(futures.ThreadPoolExecutor(max_workers=5)) add_WorkerServicer_to_server(SimulatedTaskController(self), server) server.add_insecure_port('[::]:' + worker_port) server.start() while not self.thread_stop: for task in self.tasks: seed = random.random() if seed < 0.25: report(task.taskid, task.vnodeid, RUNNING, task.token) elif seed < 0.5: report(task.taskid, task.vnodeid, COMPLETED, task.token) self.tasks.remove(task) break elif seed < 0.75: report(task.taskid, task.vnodeid, FAILED, task.token) self.tasks.remove(task) break else: pass time.sleep(5) server.stop(0) def stop(self): self.thread_stop = True def process(self, task): self.tasks.append(task) class SimulatedJobMgr(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.thread_stop = False def run(self): while not self.thread_stop: time.sleep(5) server.stop(0) def stop(self): self.thread_stop = True def report(self, task): print('[SimulatedJobMgr] task[%s] status %d' % (task.info.id, task.status)) def assignTask(self, taskmgr, taskid, instance_count, retry_count, timeout, cpu, memory, disk, gpu): task = {} task['instCount'] = instance_count task['retryCount'] = retry_count task['expTime'] = timeout task['at_same_time'] = True task['multicommand'] = True task['command'] = 'ls' task['srcAddr'] = '' task['envVars'] = {'a':'1'} task['stdErrRedPth'] = '' task['stdOutRedPth'] = '' task['image'] = 'root_root_base' task['cpuSetting'] = cpu task['memorySetting'] = memory task['diskSetting'] = disk task['gpuSetting'] = 0 task['mapping'] = [] taskmgr.add_task('root', taskid, task) class SimulatedLogger(): def info(self, msg): print('[INFO] ' + msg) def warning(self, msg): print('[WARNING] ' + msg) def error(self, msg): print('[ERROR] ' + msg) def test(): global worker global jobmgr global taskmgr worker = SimulatedWorker() worker.start() jobmgr = SimulatedJobMgr() jobmgr.start() taskmgr = master.taskmgr.TaskMgr(SimulatedNodeMgr(), SimulatedMonitorFetcher, master_ip='', scheduler_interval=2, external_logger=SimulatedLogger()) # taskmgr.set_jobmgr(jobmgr) taskmgr.start() add('task_0', instance_count=2, retry_count=2, timeout=60, cpu=2, memory=2048, disk=2048, gpu=0) def test2(): global jobmgr global taskmgr jobmgr = SimulatedJobMgr() jobmgr.start() taskmgr = master.taskmgr.TaskMgr(SimulatedNodeMgr(), SimulatedMonitorFetcher, master_ip='', scheduler_interval=2, external_logger=SimulatedLogger()) taskmgr.set_jobmgr(jobmgr) taskmgr.start() add('task_0', instance_count=2, retry_count=2, timeout=60, cpu=2, memory=2048, disk=2048, gpu=0) def add(taskid, instance_count, retry_count, timeout, cpu, memory, disk, gpu): global jobmgr global taskmgr jobmgr.assignTask(taskmgr, taskid, instance_count, retry_count, timeout, cpu, memory, disk, gpu) def report(taskid, instanceid, status, token): global taskmgr master_port = env.getenv('BATCH_MASTER_PORT') channel = grpc.insecure_channel('%s:%s' % ('0.0.0.0', master_port)) stub = MasterStub(channel) response = stub.report(ReportMsg(taskmsgs=[TaskMsg(taskid=taskid, username='root', vnodeid=instanceid, subTaskStatus=status, token=token)])) def stop(): global worker global jobmgr global taskmgr worker.stop() jobmgr.stop() taskmgr.stop() ================================================ FILE: src/master/testTaskWorker.py ================================================ import sys if sys.path[0].endswith("master"): sys.path[0] = sys.path[0][:-6] import grpc,time from protos import rpc_pb2, rpc_pb2_grpc import random, string def run(): channel = grpc.insecure_channel('localhost:50051') stub = rpc_pb2_grpc.WorkerStub(channel) #comm = rpc_pb2.Command(commandLine="ls /root;sleep 5;ls /root", packagePath="/root", envVars={'test1':'10','test2':'20'}) # | awk '{print \"test\\\"\\n\"}' #paras = rpc_pb2.Parameters(command=comm, stderrRedirectPath="/root/nfs/batch_{jobid}/", stdoutRedirectPath="/root/nfs/batch_{jobid}/") img = rpc_pb2.Image(name="base", type=rpc_pb2.Image.BASE, owner="docklet") inst = rpc_pb2.Instance(cpu=1, memory=1000, disk=1000, gpu=0) mnt = rpc_pb2.Mount(localPath="",provider='aliyun',remotePath="test-for-docklet",other="oss-cn-beijing.aliyuncs.com",accessKey="LTAIdl7gmmIhfqA9",secretKey="") network = rpc_pb2.Network(ipaddr="10.0.4.2/24",gateway="10.0.4.1",masterip="192.168.0.1",brname="batch-root-test") vnode = rpc_pb2.VNode(image=img, instance=inst, mount=[],network=network,hostname="batch-5") vnodeinfo = rpc_pb2.VNodeInfo(taskid="test",username="root",vnodeid=1,vnode=vnode) #task = rpc_pb2.TaskInfo(id="test",username="root",instanceid=1,instanceCount=1,maxRetryCount=1,parameters=paras,cluster=clu,timeout=60000,token=''.join(random.sample(string.ascii_letters + string.digits, 8))) response = stub.start_vnode(vnodeinfo) print("Batch client received: " + str(response.status)+" "+response.message) def stop_task(): channel = grpc.insecure_channel('localhost:50051') stub = rpc_pb2_grpc.WorkerStub(channel) taskmsg = rpc_pb2.TaskMsg(taskid="test",username="root",instanceid=1,instanceStatus=rpc_pb2.COMPLETED,token="test",errmsg="") reportmsg = rpc_pb2.ReportMsg(taskmsgs = [taskmsg]) response = stub.stop_tasks(reportmsg) print("Batch client received: " + str(response.status)+" "+response.message) def stop_vnode(): channel = grpc.insecure_channel('localhost:50051') stub = rpc_pb2_grpc.WorkerStub(channel) network = rpc_pb2.Network(brname="batch-root-test") vnodeinfo = rpc_pb2.VNodeInfo(taskid="test",username="root",vnodeid=1,vnode=rpc_pb2.VNode(network=network)) response = stub.stop_vnode(vnodeinfo) print("Batch client received: " + str(response.status)+" "+response.message) def start_task(): channel = grpc.insecure_channel('localhost:50051') stub = rpc_pb2_grpc.WorkerStub(channel) comm = rpc_pb2.Command(commandLine="ls /root;sleep 5;ls /root", packagePath="/root", envVars={'test1':'10','test2':'20'}) # | awk '{print \"test\\\"\\n\"}' paras = rpc_pb2.Parameters(command=comm, stderrRedirectPath="/root/nfs/batch_{jobid}/", stdoutRedirectPath="/root/nfs/batch_{jobid}/") taskinfo = rpc_pb2.TaskInfo(taskid="test",username="root",vnodeid=1,parameters=paras,timeout=20,token="test") response = stub.start_task(taskinfo) print("Batch client received: " + str(response.status)+" "+response.message) if __name__ == '__main__': #for i in range(10): #run() #start_task() stop_vnode() #time.sleep(4) #stop_task() ================================================ FILE: src/master/userManager.py ================================================ ''' userManager for Docklet provide a class for managing users and usergroups in Docklet Warning: in some early versions, "token" stand for the instance of class model.User now it stands for a string that can be parsed to get that instance. in all functions start with "@administration_required" or "@administration_or_self_required", "token" is the instance Original author: Liu Peidong ''' from utils.model import db, User, UserGroup, Notification, UserUsage, LoginMsg, LoginFailMsg from functools import wraps import os, subprocess, math import hashlib import pam from base64 import b64encode from utils import env from master.settings import settings import smtplib from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.header import Header from datetime import datetime, timedelta import json from utils.log import logger from utils.lvmtool import * PAM = pam.pam() fspath = env.getenv('FS_PREFIX') data_quota = env.getenv('DATA_QUOTA') data_quota_cmd = env.getenv('DATA_QUOTA_CMD') if (env.getenv('EXTERNAL_LOGIN').lower() == 'true'): from plugin import external_receive def administration_required(func): @wraps(func) def wrapper(*args, **kwargs): if ( ('cur_user' in kwargs) == False): return {"success":'false', "reason":"Cannot get cur_user"} cur_user = kwargs['cur_user'] if ((cur_user.user_group == 'admin') or (cur_user.user_group == 'root')): return func(*args, **kwargs) else: return {"success": 'false', "reason": 'Unauthorized Action'} return wrapper def administration_or_self_required(func): @wraps(func) def wrapper(*args, **kwargs): if ( (not ('cur_user' in kwargs)) or (not ('user' in kwargs))): return {"success":'false', "reason":"Cannot get cur_user or user"} cur_user = kwargs['cur_user'] user = kwargs['user'] if ((cur_user.user_group == 'admin') or (cur_user.user_group == 'root') or (cur_user.username == user.username)): return func(*args, **kwargs) else: return {"success": 'false', "reason": 'Unauthorized Action'} return wrapper def token_required(func): @wraps(func) def wrapper(*args, **kwargs): if ( ('cur_user' in kwargs) == False): return {"success":'false', "reason":"Cannot get cur_user"} return func(*args, **kwargs) return wrapper def send_activated_email(to_address, username): email_from_address = settings.get('EMAIL_FROM_ADDRESS') if (email_from_address in ['\'\'', '\"\"', '']): return #text = 'Dear '+ username + ':\n' + ' Your account in docklet has been activated' text = '

Dear '+ username + ':

' text += '''

      Your account in %s has been activated

      Enjoy your personal workspace in the cloud !


      Note: DO NOT reply to this email!



Docklet Team, SEI, PKU

''' % (env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL")) text += '

'+ str(datetime.now()) + '

' text += '' subject = 'Docklet account activated' msg = MIMEMultipart() textmsg = MIMEText(text,'html','utf-8') msg['Subject'] = Header(subject, 'utf-8') msg['From'] = email_from_address msg['To'] = to_address msg.attach(textmsg) s = smtplib.SMTP() s.connect() s.sendmail(email_from_address, to_address, msg.as_string()) s.close() def send_remind_activating_email(username): #admin_email_address = env.getenv('ADMIN_EMAIL_ADDRESS') nulladdr = ['\'\'', '\"\"', ''] email_from_address = settings.get('EMAIL_FROM_ADDRESS') admin_email_address = settings.get('ADMIN_EMAIL_ADDRESS') if (email_from_address in nulladdr or admin_email_address in nulladdr): return #text = 'Dear '+ username + ':\n' + ' Your account in docklet has been activated' text = '

Dear '+ 'admin' + ':

' text += '''

      An activating request for %s in %s has been sent

      Please check it !



Docklet Team, SEI, PKU

''' % (username, env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL")) text += '

'+ str(datetime.utcnow()) + '

' text += '' subject = 'An activating request in Docklet has been sent' if admin_email_address[0] == '"': admins_addr = admin_email_address[1:-1].split(" ") else: admins_addr = admin_email_address.split(" ") alladdr="" for addr in admins_addr: alladdr = alladdr+addr+", " alladdr=alladdr[:-2] msg = MIMEMultipart() textmsg = MIMEText(text,'html','utf-8') msg['Subject'] = Header(subject, 'utf-8') msg['From'] = email_from_address msg['To'] = alladdr msg.attach(textmsg) s = smtplib.SMTP() s.connect() try: s.sendmail(email_from_address, admins_addr, msg.as_string()) except Exception as e: logger.error(e) s.close() class userManager: def __init__(self, username = 'root', password = None): ''' Try to create the database when there is none initialize 'root' user and 'root' & 'primary' group ''' try: User.query.all() except: db.create_all() if password == None: #set a random password password = os.urandom(16) password = b64encode(password).decode('utf-8') fsdir = env.getenv('FS_PREFIX') f = open(fsdir + '/local/generated_password.txt', 'w') f.write("User=%s\nPass=%s\n"%(username, password)) f.close() sys_admin = User(username, hashlib.sha512(password.encode('utf-8')).hexdigest()) sys_admin.status = 'normal' sys_admin.nickname = 'root' sys_admin.description = 'Root_User' sys_admin.user_group = 'root' sys_admin.auth_method = 'local' db.session.add(sys_admin) path = env.getenv('DOCKLET_LIB') subprocess.call([path+"/master/userinit.sh", username]) db.session.commit() if not os.path.exists(fspath+"/global/sys/quota"): groupfile = open(fspath+"/global/sys/quota",'w') groups = [] groups.append({'name':'root', 'quotas':{ 'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}}) groups.append({'name':'admin', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}}) groups.append({'name':'primary', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}}) groups.append({'name':'foundation', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}}) groupfile.write(json.dumps(groups)) groupfile.close() if not os.path.exists(fspath+"/global/sys/quotainfo"): quotafile = open(fspath+"/global/sys/quotainfo",'w') quotas = {} quotas['default'] = 'foundation' quotas['quotainfo'] = [] quotas['quotainfo'].append({'name':'cpu', 'hint':'the cpu quota, number of cores, e.g. 4'}) quotas['quotainfo'].append({'name':'memory', 'hint':'the memory quota, number of MB , e.g. 4000'}) quotas['quotainfo'].append({'name':'disk', 'hint':'the disk quota, number of MB, e.g. 4000'}) quotas['quotainfo'].append({'name':'data', 'hint':'the quota of data space, number of GB, e.g. 100'}) quotas['quotainfo'].append({'name':'image', 'hint':'how many images the user can save, e.g. 10'}) quotas['quotainfo'].append({'name':'idletime', 'hint':'will stop cluster after idletime, number of hours, e.g. 24'}) quotas['quotainfo'].append({'name':'vnode', 'hint':'how many containers the user can have, e.g. 8'}) quotas['quotainfo'].append({'name':'portmapping', 'hint':'how many ports the user can map, e.g. 8'}) quotas['quotainfo'].append({'name':'input_rate_limit', 'hint':'the ingress speed of the network, number of kbps. 0 means the rate are unlimited.'}) quotas['quotainfo'].append({'name':'output_rate_limit', 'hint':'the egress speed of the network, number of kbps. 0 means the rate are unlimited.'}) quotafile.write(json.dumps(quotas)) quotafile.close() if not os.path.exists(fspath+"/global/sys/lxc.default"): settingfile = open(fspath+"/global/sys/lxc.default", 'w') settings = {} settings['cpu'] = "2" settings["memory"] = "2000" settings["disk"] = "2000" settingfile.write(json.dumps(settings)) settingfile.close() try: UserUsage.query.all() LoginMsg.query.all() LoginFailMsg.query.all() except: db.create_all() def auth_local(self, username, password): password = hashlib.sha512(password.encode('utf-8')).hexdigest() user = User.query.filter_by(username = username).first() if (user == None): return {"success":'false', "reason": "User does not exist!"} if (user.password != password): return {"success":'false', "reason": "Wrong password!"} result = { "success": 'true', "data":{ "username" : user.username, "avatar" : user.avatar, "nickname" : user.nickname, "description" : user.description, "status" : user.status, "group" : user.user_group, "token" : user.generate_auth_token(), } } return result def auth_pam(self, username, password): user = User.query.filter_by(username = username).first() pamresult = PAM.authenticate(username, password) if (pamresult == False or (user != None and user.auth_method != 'pam')): return {"success":'false', "reason": "Wrong password or wrong login method!"} if (user == None): newuser = self.newuser(); newuser.username = username newuser.password = "no_password" newuser.nickname = username newuser.status = "init" newuser.user_group = "primary" newuser.auth_method = "pam" self.register(user = newuser) user = User.query.filter_by(username = username).first() result = { "success": 'true', "data":{ "username" : user.username, "avatar" : user.avatar, "nickname" : user.nickname, "description" : user.description, "status" : user.status, "group" : user.user_group, "token" : user.generate_auth_token(), } } return result def auth_external(self, form, userip=""): if (env.getenv('EXTERNAL_LOGIN') != 'True'): failed_result = {'success': 'false', 'reason' : 'external auth disabled'} return failed_result result = external_receive.external_auth_receive_request(form) if (result['success'] != 'True'): failed_result = {'success':'false', 'result': result} return failed_result username = result['username'] logger.info("External login success: username=%s, userip=%s" % (username, userip)) loginmsg = LoginMsg(username,userip) db.session.add(loginmsg) db.session.commit() user = User.query.filter_by(username = username).first() if (user != None and user.auth_method == result['auth_method']): result = { "success": 'true', "data":{ "username" : user.username, "avatar" : user.avatar, "nickname" : user.nickname, "description" : user.description, "status" : user.status, "group" : user.user_group, "token" : user.generate_auth_token(), } } return result if (user != None and user.auth_method != result['auth_method']): result = {'success': 'false', 'reason': 'other kinds of account already exists'} return result #user == None , register an account for external user newuser = self.newuser(); newuser.username = result['username'] newuser.password = result['password'] newuser.avatar = result['avatar'] newuser.nickname = result['nickname'] newuser.description = result['description'] newuser.e_mail = result['e_mail'] newuser.truename = result['truename'] newuser.student_number = result['student_number'] newuser.status = result['status'] newuser.user_group = result['user_group'] newuser.auth_method = result['auth_method'] newuser.department = result['department'] newuser.tel = result['tel'] self.register(user = newuser) user = User.query.filter_by(username = username).first() result = { "success": 'true', "data":{ "username" : user.username, "avatar" : user.avatar, "nickname" : user.nickname, "description" : user.description, "status" : user.status, "group" : user.user_group, "token" : user.generate_auth_token(), } } return result def auth(self, username, password, userip=""): ''' authenticate a user by username & password return a token as well as some user information ''' user = User.query.filter_by(username = username).first() failmsg = LoginFailMsg.query.filter_by(username = username).first() result = {} if failmsg == None: newfailmsg = LoginFailMsg(username) db.session.add(newfailmsg) db.session.commit() failmsg = newfailmsg elif failmsg.failcnt > 40: reason = "You have been input wrong password over 40 times. You account will be locked. Please contact administrators for help." logger.info("Login failed: userip=%s reason:%s" % (userip,reason)) return {'success':'false', 'reason':reason} elif datetime.now() < failmsg.bantime: reason = "You have been input wrong password %d times. Please try after %s." % (failmsg.failcnt, failmsg.bantime.strftime("%Y-%m-%d %H:%M:%S")) logger.info("Login failed: userip=%s reason:%s" % (userip,reason)) return {'success':'false', 'reason':reason} if (user == None or user.auth_method =='local'): result = self.auth_local(username, password) elif (user.auth_method == 'pam'): result = self.auth_pam(username, password) else: result = {'success':'false', 'reason':'auth_method error!'} if result['success'] == 'true': loginmsg = LoginMsg(result['data']['username'],userip) failmsg.failcnt = 0 db.session.add(loginmsg) db.session.commit() logger.info("Login success: username=%s, userip=%s" % (result['data']['username'], userip)) else: logger.info("Login failed: userip=%s" % (userip)) failmsg.failcnt += 1 if failmsg.failcnt == 5: failmsg.bantime = datetime.now() + timedelta(minutes=5) elif failmsg.failcnt == 10: failmsg.bantime = datetime.now() + timedelta(minutes=10) elif failmsg.failcnt == 20: failmsg.bantime = datetime.now() + timedelta(minutes=100) elif failmsg.failcnt == 30: failmsg.bantime = datetime.now() + timedelta(days=1) db.session.commit() return result def auth_token(self, token): ''' authenticate a user by a token when succeeded, return the database iterator otherwise return None ''' user = User.verify_auth_token(token) return user def set_nfs_quota_bygroup(self,groupname, quota): if not data_quota == "True": return users = User.query.filter_by(user_group = groupname).all() for user in users: self.set_nfs_quota(user.username, quota) def set_nfs_quota(self, username, quota): if not data_quota == "True": return nfspath = "/users/%s/data" % username try: cmd = data_quota_cmd % (nfspath,quota+"GB") sys_run(cmd.strip('"')) except Exception as e: logger.error(e) @administration_required def query(*args, **kwargs): ''' Usage: query(username = 'xxx', cur_user = token_from_auth) || query(ID = a_integer, cur_user = token_from_auth) Provide information about one user that administrators need to use ''' if ( 'ID' in kwargs): user = User.query.filter_by(id = kwargs['ID']).first() if (user == None): return {"success":False, "reason":"User does not exist"} result = { "success":'true', "data":{ "username" : user.username, "id": user.id, "password" : user.password, "avatar" : user.avatar, "nickname" : user.nickname, "description" : user.description, "status" : user.status, "e_mail" : user.e_mail, "student_number": user.student_number, "department" : user.department, "truename" : user.truename, "tel" : user.tel, "register_date" : "%s"%(user.register_date), "group" : user.user_group, "description" : user.description, "beans" : user.beans, }, "token": user } return result if ( 'username' not in kwargs): return {"success":'false', "reason":"Cannot get 'username'"} username = kwargs['username'] user = User.query.filter_by(username = username).first() if (user == None): return {"success":'false', "reason":"User does not exist"} result = { "success": 'true', "data":{ "username" : user.username, "id": user.id, "password" : user.password, "avatar" : user.avatar, "nickname" : user.nickname, "description" : user.description, "status" : user.status, "e_mail" : user.e_mail, "student_number": user.student_number, "department" : user.department, "truename" : user.truename, "tel" : user.tel, "register_date" : "%s"%(user.register_date), "group" : user.user_group, "beans" : user.beans, }, "token": user } return result @token_required def selfQuery(*args, **kwargs): ''' Usage: selfQuery(cur_user = token_from_auth) List informantion for oneself ''' user = kwargs['cur_user'] groupfile = open(fspath+"/global/sys/quota",'r') groups = json.loads(groupfile.read()) groupfile.close() group = None for one_group in groups: if one_group['name'] == user.user_group: group = one_group['quotas'] break else: for one_group in groups: if one_group['name'] == "primary": group = one_group['quotas'] break result = { "success": 'true', "data":{ "username" : user.username, "id": user.id, "password" : user.password, "avatar" : user.avatar, "nickname" : user.nickname, "description" : user.description, "status" : user.status, "e_mail" : user.e_mail, "student_number": user.student_number, "department" : user.department, "truename" : user.truename, "tel" : user.tel, "register_date" : "%s"%(user.register_date), "group" : user.user_group, "groupinfo": group, "beans" : user.beans, "auth_method": user.auth_method, }, } return result @token_required def selfModify(*args, **kwargs): ''' Usage: selfModify(cur_user = token_from_auth, newValue = form) Modify informantion for oneself ''' form = kwargs['newValue'] name = form.get('name', None) value = form.get('value', None) if (name == None or value == None): result = {'success': 'false'} return result user = User.query.filter_by(username = kwargs['cur_user'].username).first() if (name == 'nickname'): user.nickname = value elif (name == 'description'): user.description = value elif (name == 'department'): user.department = value elif (name == 'e_mail'): user.e_mail = value elif (name == 'tel'): user.tel = value elif (name == 'password'): old_password = hashlib.sha512(form.get('old_value', '').encode('utf-8')).hexdigest() if (user.password != old_password): result = {'success': 'false'} return result user.password = hashlib.sha512(value.encode('utf-8')).hexdigest() else: result = {'success': 'false'} return result db.session.commit() result = {'success': 'true'} return result @token_required def usageQuery(self, *args, **kwargs): ''' Usage: usageQuery(cur_user = token_from_auth) Query the quota and usage of user ''' cur_user = kwargs['cur_user'] groupname = cur_user.user_group groupinfo = self.groupQuery(name = groupname)['data'] usage = UserUsage.query.filter_by(username = cur_user.username).first() if usage == None: new_usage = UserUsage(cur_user.username) db.session.add(new_usage) db.session.commit() usageinfo = { 'username': cur_user.username, 'cpu': '0', 'memory': '0', 'disk': '0' } else: usageinfo = { 'username': usage.username, 'cpu': usage.cpu, 'memory': usage.memory, 'disk': usage.disk } settingfile = open(fspath+"/global/sys/lxc.default" , 'r') defaultsetting = json.loads(settingfile.read()) settingfile.close() return {'success': 'true', 'quota' : groupinfo, 'usage' : usageinfo, 'default': defaultsetting } @token_required def usageInc(self, *args, **kwargs): ''' Usage: usageModify(cur_user = token_from_auth, modification = data_from_form) Modify the usage info of user ''' cur_user = kwargs['cur_user'] modification = kwargs['modification'] logger.info("record usage for user:%s" % cur_user.username) groupname = cur_user.user_group groupinfo = self.groupQuery(name = groupname)['data'] usage = UserUsage.query.filter_by(username = cur_user.username).first() if usage == None: new_usage = UserUsage(cur_user.username) db.session.add(new_usage) db.session.commit() usage = UserUsage.query.filter_by(username = cur_user.username).first() if int(modification['cpu']) <= 0 or int(modification['memory']) <= 0 or int(modification['disk']) <= 0: return {'success':False, 'result':"cpu,memory and disk setting cannot less than zero"} cpu = int(usage.cpu) + int(modification['cpu']) memory = int(usage.memory) + int(modification['memory']) disk = int(usage.disk) + int(modification['disk']) if cpu > int(groupinfo['cpu']): logger.error("cpu quota exceed, user:%s" % cur_user.username) return {'success':False, 'result':"cpu quota exceed"} if memory > int(groupinfo['memory']): logger.error("memory quota exceed, user:%s" % cur_user.username) return {'success':False, 'result':"memory quota exceed"} if disk > int(groupinfo['disk']): logger.error("disk quota exceed, user:%s" % cur_user.username) return {'success':False, 'result':"disk quota exceed"} usage.cpu = str(cpu) usage.memory = str(memory) usage.disk = str(disk) db.session.commit() return {'success':True, 'result':"distribute the resource"} @token_required def usageRecover(self, *args, **kwargs): ''' Usage: usageModify(cur_user = token_from_auth, modification = data_from_form) Recover the usage info when create container failed ''' cur_user = kwargs['cur_user'] modification = kwargs['modification'] logger.info("recover usage for user:%s" % cur_user.username) usage = UserUsage.query.filter_by(username = cur_user.username).first() if usage == None: new_usage = UserUsage(cur_user.username) db.session.add(new_usage) db.session.commit() usage = UserUsage.query.filter_by(username = cur_user.username).first() return True cpu = int(usage.cpu) - int(modification['cpu']) memory = int(usage.memory) - int(modification['memory']) disk = int(usage.disk) - int(modification['disk']) if cpu < 0: cpu = 0 if memory < 0: memory = 0 if disk < 0: disk = 0 usage.cpu = str(cpu) usage.memory = str(memory) usage.disk = str(disk) db.session.commit() return {'success':True} @token_required def usageRelease(self, *args, **kwargs): cur_user = kwargs['cur_user'] cpu = kwargs['cpu'] memory = kwargs['memory'] disk = kwargs['disk'] usage = UserUsage.query.filter_by(username = cur_user.username).first() if usage == None: new_usage = UserUsage(cur_user.username) db.session.add(new_usage) db.session.commit() return {'success':True} nowcpu = int(usage.cpu) - int(cpu) nowmemory = int(usage.memory) - int(memory) nowdisk = int(usage.disk) - int(disk) if nowcpu < 0: nowcpu = 0 if nowmemory < 0: nowmemory = 0 if nowdisk < 0: nowdisk = 0 usage.cpu = str(nowcpu) usage.memory = str(nowmemory) usage.disk = str(nowdisk) db.session.commit() return {'success':True} def initUsage(*args, **kwargs): """ init the usage info when start docklet with init mode """ usages = UserUsage.query.all() for usage in usages: usage.cpu = "0" usage.memory = "0" usage.disk = "0" db.session.commit() return True @administration_required def userList(*args, **kwargs): ''' Usage: list(cur_user = token_from_auth) List all users for an administrator ''' alluser = User.query.all() result = { "success": 'true', "data":[] } for user in alluser: userinfo = [ user.id, user.username, user.truename, user.e_mail, user.tel, "%s"%(user.register_date), user.status, user.user_group, user.beans, '', ] result["data"].append(userinfo) return result @administration_required def groupList(*args, **kwargs): ''' Usage: list(cur_user = token_from_auth) List all groups for an administrator ''' groupfile = open(fspath+"/global/sys/quota",'r') groups = json.loads(groupfile.read()) groupfile.close() quotafile = open(fspath+"/global/sys/quotainfo",'r') quotas = json.loads(quotafile.read()) quotafile.close() result = { "success": 'true', "groups": groups, "quotas": quotas['quotainfo'], "default": quotas['default'], } return result @administration_required def change_default_group(*args, **kwargs): form = kwargs['form'] default_group = form.get('defaultgroup') quotafile = open(fspath+"/global/sys/quotainfo",'r') quotas = json.loads(quotafile.read()) quotafile.close() quotas['default'] = default_group quotafile = open(fspath+"/global/sys/quotainfo",'w') quotafile.write(json.dumps(quotas)) quotafile.close() return { 'success':'true', 'action':'change default group' } def groupQuery(self, *args, **kwargs): ''' Usage: groupQuery(name = XXX, cur_user = token_from_auth) List a group for an administrator ''' groupfile = open(fspath+"/global/sys/quota",'r') groups = json.loads(groupfile.read()) groupfile.close() for group in groups: if group['name'] == kwargs['name']: result = { "success":'true', "data": group['quotas'], } return result else: return {"success":False, "reason":"Group does not exist"} @administration_required def groupListName(*args, **kwargs): ''' Usage: grouplist(cur_user = token_from_auth) List all group names for an administrator ''' groupfile = open(fspath+"/global/sys/quota",'r') groups = json.loads(groupfile.read()) groupfile.close() result = { "groups": [], } for group in groups: result["groups"].append(group['name']) return result @administration_required def groupModify(self, *args, **kwargs): ''' Usage: groupModify(newValue = dict_from_form, cur_user = token_from_auth) ''' groupfile = open(fspath+"/global/sys/quota",'r') groups = json.loads(groupfile.read()) groupfile.close() for group in groups: if group['name'] == kwargs['newValue'].get('groupname',None): form = kwargs['newValue'] for key in form.keys(): if key == "data": if not group['quotas'][key] == form.get(key): self.set_nfs_quota_bygroup(group['name'],form.get(key)) else: pass if key == "groupname" or key == "token": pass else: if key == "vnode": vnode = int(form['vnode']) val = str(2**(round(math.log(vnode+3, 2))) - 3 ) group["quotas"][key] = val else: group['quotas'][key] = form.get(key) groupfile = open(fspath+"/global/sys/quota",'w') groupfile.write(json.dumps(groups)) groupfile.close() return {"success":'true'} else: return {"success":'false', "reason":"UserGroup does not exist"} @administration_required def modify(self, *args, **kwargs): ''' modify a user's information in database will send an e-mail when status is changed from 'applying' to 'normal' Usage: modify(newValue = dict_from_form, cur_user = token_from_auth) ''' if ( kwargs['newValue'].get('Instruction', '') == 'Activate'): user_modify = User.query.filter_by(id = kwargs['newValue'].get('ID', None)).first() user_modify.status = 'normal' send_activated_email(user_modify.e_mail, user_modify.username) db.session.commit() return {"success": "true"} if ( kwargs['newValue'].get('password', '') != ''): user_modify = User.query.filter_by(username = kwargs['newValue'].get('username', None)).first() new_password = kwargs['newValue'].get('password','') new_password = hashlib.sha512(new_password.encode('utf-8')).hexdigest() user_modify.password = new_password db.session.commit() return {"success": "true"} user_modify = User.query.filter_by(username = kwargs['newValue'].get('username', None)).first() if (user_modify == None): return {"success":'false', "reason":"User does not exist"} #try: form = kwargs['newValue'] user_modify.truename = form.get('truename', '') user_modify.e_mail = form.get('e_mail', '') user_modify.department = form.get('department', '') user_modify.student_number = form.get('student_number', '') user_modify.tel = form.get('tel', '') user_modify.user_group = form.get('group', '') user_modify.auth_method = form.get('auth_method', '') if (user_modify.status == 'applying' and form.get('status', '') == 'normal'): send_activated_email(user_modify.e_mail, user_modify.username) user_modify.status = form.get('status', '') #if (form.get('password', '') != ''): #new_password = form.get('password','') #new_password = hashlib.sha512(new_password.encode('utf-8')).hexdigest() #user_modify.password = new_password #self.chpassword(cur_user = user_modify, password = form.get('password','no_password')) #modify password in another function now db.session.commit() res = self.groupQuery(name=user_modify.user_group) if res['success']: self.set_nfs_quota(user_modify.username,res['data']['data']) return {"success":'true'} #except: #return {"success":'false', "reason":"Something happened"} @token_required def chpassword(*args, **kwargs): ''' Usage: chpassword(cur_user = token_from_auth, password = 'your_password') ''' cur_user = kwargs['cur_user'] cur_user.password = hashlib.sha512(kwargs['password'].encode('utf-8')).hexdigest() def newuser(*args, **kwargs): ''' Usage : newuser() The only method to create a new user call this method first, modify the return value which is a database row instance,then call self.register() ''' user_new = User('newuser', 'asdf1234') quotafile = open(fspath+"/global/sys/quotainfo",'r') quotas = json.loads(quotafile.read()) quotafile.close() user_new.user_group = quotas['default'] user_new.avatar = 'default.png' return user_new def register(self, *args, **kwargs): ''' Usage: register(user = modified_from_newuser()) ''' if (kwargs['user'].username == None or kwargs['user'].username == ''): return {"success":'false', "reason": "Empty username"} user_check = User.query.filter_by(username = kwargs['user'].username).first() if (user_check != None and user_check.status != "init"): #for the activating form return {"success":'false', "reason": "Unauthorized action"} newuser = kwargs['user'] if (user_check != None and (user_check.status == "init")): db.session.delete(user_check) db.session.commit() else: newuser.password = hashlib.sha512(newuser.password.encode('utf-8')).hexdigest() db.session.add(newuser) db.session.commit() # if newuser status is normal, init some data for this user # now initialize for all kind of users #if newuser.status == 'normal': path = env.getenv('DOCKLET_LIB') subprocess.call([path+"/master/userinit.sh", newuser.username]) res = self.groupQuery(name=newuser.user_group) if res['success']: self.set_nfs_quota(newuser.username,res['data']['data']) return {"success":'true'} @administration_required def quotaadd(*args, **kwargs): form = kwargs.get('form') quotaname = form.get("quotaname") default_value = form.get("default_value") hint = form.get("hint") if (quotaname == None): return { "success":'false', "reason": "Empty quota name"} if (default_value == None): default_value = "--" groupfile = open(fspath+"/global/sys/quota",'r') groups = json.loads(groupfile.read()) groupfile.close() for group in groups: group['quotas'][quotaname] = default_value groupfile = open(fspath+"/global/sys/quota",'w') groupfile.write(json.dumps(groups)) groupfile.close() quotafile = open(fspath+"/global/sys/quotainfo",'r') quotas = json.loads(quotafile.read()) quotafile.close() quotas['quotainfo'].append({'name':quotaname, 'hint':hint}) quotafile = open(fspath+"/global/sys/quotainfo",'w') quotafile.write(json.dumps(quotas)) quotafile.close() return {"success":'true'} @administration_required def groupadd(*args, **kwargs): form = kwargs.get('form') groupname = form.get("groupname") if (groupname == None): return {"success":'false', "reason": "Empty group name"} groupfile = open(fspath+"/global/sys/quota",'r') groups = json.loads(groupfile.read()) groupfile.close() group = { 'name': groupname, 'quotas': {} } for key in form.keys(): if key == "groupname" or key == "token": pass else: if key == "vnode": vnode = int(form['vnode']) val = str(2**(round(math.log(vnode+3, 2))) - 3 ) group['quotas'][key] = val else: group['quotas'][key] = form.get(key) groups.append(group) groupfile = open(fspath+"/global/sys/quota",'w') groupfile.write(json.dumps(groups)) groupfile.close() return {"success":'true'} @administration_required def groupdel(*args, **kwargs): name = kwargs.get('name', None) if (name == None): return {"success":'false', "reason": "Empty group name"} groupfile = open(fspath+"/global/sys/quota",'r') groups = json.loads(groupfile.read()) groupfile.close() for group in groups: if group['name'] == name: groups.remove(group) break groupfile = open(fspath+"/global/sys/quota",'w') groupfile.write(json.dumps(groups)) groupfile.close() return {"success":'true'} @administration_required def lxcsettingList(*args, **kwargs): lxcsettingfile = open(fspath+"/global/sys/lxc.default", 'r') lxcsetting = json.loads(lxcsettingfile.read()) lxcsettingfile.close() return {"success": 'true', 'data':lxcsetting} @administration_required def chlxcsetting(*args, **kwargs): form = kwargs['form'] lxcsetting = {} lxcsetting['cpu'] = form['lxcCpu'] lxcsetting['memory'] = form['lxcMemory'] lxcsetting['disk'] = form['lxcDisk'] lxcsettingfile = open(fspath+"/global/sys/lxc.default", 'w') lxcsettingfile.write(json.dumps(lxcsetting)) lxcsettingfile.close() return {"success": 'true'} def queryForDisplay(*args, **kwargs): ''' Usage: queryForDisplay(user = token_from_auth) Provide information about one user that administrators need to use ''' if ( 'user' not in kwargs): return {"success":'false', "reason":"Cannot get 'user'"} user = kwargs['user'] if (user == None): return {"success":'false', "reason":"User does not exist"} result = { "success": 'true', "data":{ "username" : user.username, "password" : user.password, "avatar" : user.avatar, "nickname" : user.nickname, "description" : user.description, "status" : user.status, "e_mail" : user.e_mail, "student_number": user.student_number, "department" : user.department, "truename" : user.truename, "tel" : user.tel, "register_date" : "%s"%(user.register_date), "group" : user.user_group, "auth_method": user.auth_method, } } return result # def usermodify(rowID, columnID, newValue, cur_user): # '''not used now''' # user = um.query(ID = request.form["rowID"], cur_user = root).get('token', None) # result = um.modify(user = user, columnID = request.form["columnID"], newValue = request.form["newValue"], cur_user = root) # return json.dumps(result) ================================================ FILE: src/master/userinit.sh ================================================ #!/bin/bash # initialize for a new user # initialize directory : clusters, data, ssh # generate ssh keys for new user [ -z $FS_PREFIX ] && FS_PREFIX="/opt/docklet" USERNAME=$1 [ -z $USERNAME ] && echo "[userinit.sh] USERNAME is needed" && exit 1 echo "[Info] [userinit.sh] initialize for user $USERNAME" USER_DIR=$FS_PREFIX/global/users/$USERNAME [ -d $USER_DIR ] && echo "[userinit.sh] user directory already exists" && exit 0 mkdir -p $USER_DIR/{clusters,hosts,data,ssh} SSH_DIR=$USER_DIR/ssh # here generate id_rsa.pub has "user@hostname" at the end # maybe it should be delete ssh-keygen -t rsa -P '' -f $SSH_DIR/id_rsa &>/dev/null cp $SSH_DIR/id_rsa.pub $SSH_DIR/authorized_keys cat << EOF > $SSH_DIR/config Host * StrictHostKeyChecking no UserKnownHostsFile=/dev/null EOF ================================================ FILE: src/master/vclustermgr.py ================================================ #!/usr/bin/python3 import os, random, json, sys import datetime, math, time from utils.log import logger from utils import env, imagemgr, proxytool import requests, threading, traceback from utils.nettools import portcontrol from utils.model import db, Container, PortMapping, VCluster from queue import Queue userpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT')) def post_to_user(url = '/', data={}): return requests.post(userpoint+url,data=data).json() ################################################## # VclusterMgr # Description : VclusterMgr start/stop/manage virtual clusters # ################################################## def db_commit(): try: db.session.commit() except Exception as err: db.session.rollback() logger.error(traceback.format_exc()) return False return True class VclusterMgr(object): def __init__(self, nodemgr, networkmgr, etcdclient, addr, mode, distributedgw='False'): self.mode = mode self.distributedgw = distributedgw self.nodemgr = nodemgr self.imgmgr = imagemgr.ImageMgr() self.networkmgr = networkmgr self.addr = addr self.etcd = etcdclient self.defaultsize = env.getenv("CLUSTER_SIZE") self.fspath = env.getenv("FS_PREFIX") self.clusterid_locks = threading.Lock() # check database try: Container.query.all() PortMapping.query.all() VCluster.query.all() except: # create database db.create_all() logger.info ("vcluster start on %s" % (self.addr)) if self.mode == 'new': logger.info ("starting in new mode on %s" % (self.addr)) # check if all clusters data are deleted in httprest.py clean = True usersdir = self.fspath+"/global/users/" vclusters = VCluster.query.all() if len(vclusters) != 0: clean = False for user in os.listdir(usersdir): if len(os.listdir(usersdir+user+"/hosts")) > 0: clean = False if not clean: logger.error ("clusters files not clean, start failed") sys.exit(1) elif self.mode == "recovery": logger.info ("starting in recovery mode on %s" % (self.addr)) self.recover_allclusters() else: logger.error ("not supported mode:%s" % self.mode) sys.exit(1) # start a thread to watch recovering node to recover clusters threading.Thread(target = self._watchrecovering, args=()).start() def _watchrecovering(self): logger.info("watching recovering node to recover clusters...") while True: node = self.nodemgr.recover_queue.get() self.recover_cluster_on(node) self.nodemgr.recover_queue.task_done() def recover_allclusters(self): logger.info("try to recover all clusters for all users...") vcs = VCluster.query.filter(VCluster.status.in_(["running","error"])).all() need_recovers = [] for vc in vcs: need_recovers.append([vc.clustername, vc.ownername]) self.recover_clusters(need_recovers) def mount_allclusters(self): logger.info("mounting all vclusters for all users...") usersdir = self.fspath+"/global/users/" for user in os.listdir(usersdir): for cluster in self.list_clusters(user)[1]: logger.info ("mounting cluster:%s for user:%s ..." % (cluster, user)) self.mount_cluster(cluster, user) logger.info("mounted all vclusters for all users") def stop_allclusters(self): logger.info("stopping all vclusters for all users...") usersdir = self.fspath+"/global/users/" for user in os.listdir(usersdir): for cluster in self.list_clusters(user)[1]: logger.info ("stopping cluster:%s for user:%s ..." % (cluster, user)) self.stop_cluster(cluster, user) logger.info("stopped all vclusters for all users") def detach_allclusters(self): logger.info("detaching all vclusters for all users...") usersdir = self.fspath+"/global/users/" for user in os.listdir(usersdir): for cluster in self.list_clusters(user)[1]: logger.info ("detaching cluster:%s for user:%s ..." % (cluster, user)) self.detach_cluster(cluster, user) logger.info("detached all vclusters for all users") def create_cluster(self, clustername, username, image, user_info, setting): if self.is_cluster(clustername, username): return [False, "cluster:%s already exists" % clustername] if self.imgmgr.get_image_size(image) + 100 > int(setting["disk"]): return [False, "the size of disk is not big enough for the image"] clustersize = int(self.defaultsize) logger.info ("starting cluster %s with %d containers for %s" % (clustername, int(clustersize), username)) workers = self.nodemgr.get_base_nodeips() image_json = json.dumps(image) groupname = json.loads(user_info)["data"]["group"] groupquota = json.loads(user_info)["data"]["groupinfo"] uid = json.loads(user_info)["data"]["id"] if (len(workers) == 0): logger.warning ("no workers to start containers, start cluster failed") return [False, "no workers are running"] # check user IP pool status, should be moved to user init later if not self.networkmgr.has_user(username): ipnum = int(groupquota["vnode"]) + 3 cidr = 32 - math.ceil(math.log(ipnum,2)) self.networkmgr.add_user(username, cidr=cidr, isshared = True if str(groupname) == "fundation" else False) if self.distributedgw == "False": [success,message] = self.networkmgr.setup_usrgw(groupquota['input_rate_limit'], groupquota['output_rate_limit'], username, uid, self.nodemgr) if not success: return [False, message] elif not self.networkmgr.has_usrgw(username): self.networkmgr.usrgws[username] = self.networkmgr.masterip self.networkmgr.dump_usrgw(username) [status, result] = self.networkmgr.acquire_userips_cidr(username, clustersize) gateway = self.networkmgr.get_usergw(username) #vlanid = self.networkmgr.get_uservlanid(username) logger.info ("create cluster with gateway : %s" % gateway) self.networkmgr.printpools() if not status: logger.info ("create cluster failed: %s" % result) return [False, result] ips = result clusterid = self._acquire_id() clusterpath = self.fspath+"/global/users/"+username+"/clusters/"+clustername hostpath = self.fspath+"/global/users/"+username+"/hosts/"+str(clusterid)+".hosts" hosts = "127.0.0.1\tlocalhost\n" proxy_server_ip = "" proxy_public_ip = "" containers = [] for i in range(0, clustersize): workerip = workers[random.randint(0, len(workers)-1)] oneworker = self.nodemgr.ip_to_rpc(workerip) if self.distributedgw == "True" and i == 0 and not self.networkmgr.has_usrgw(username): [success,message] = self.networkmgr.setup_usrgw(groupquota['input_rate_limit'], groupquota['output_rate_limit'], username, uid, self.nodemgr, workerip) if not success: return [False, message] if i == 0: self.networkmgr.load_usrgw(username) proxy_server_ip = self.networkmgr.usrgws[username] [status, proxy_public_ip] = self.etcd.getkey("machines/publicIP/"+proxy_server_ip) if not status: logger.error("Fail to get proxy_public_ip %s."%(proxy_server_ip)) return [False, "Fail to get proxy server public IP."] lxc_name = username + "-" + str(clusterid) + "-" + str(i) hostname = "host-"+str(i) logger.info ("create container with : name-%s, username-%s, clustername-%s, clusterid-%s, hostname-%s, ip-%s, gateway-%s, image-%s" % (lxc_name, username, clustername, str(clusterid), hostname, ips[i], gateway, image_json)) [success,message] = oneworker.create_container(lxc_name, proxy_public_ip, username, uid, json.dumps(setting) , clustername, str(clusterid), str(i), hostname, ips[i], gateway, image_json) if success is False: self.networkmgr.release_userips(username, ips[i]) logger.info("container create failed, so vcluster create failed") return [False, message] logger.info("container create success") hosts = hosts + ips[i].split("/")[0] + "\t" + hostname + "\t" + hostname + "."+clustername + "\n" containers.append(Container(lxc_name,hostname,ips[i],workerip,image['name'],datetime.datetime.now(),setting)) #containers.append({ 'containername':lxc_name, 'hostname':hostname, 'ip':ips[i], 'host':workerip, 'image':image['name'], 'lastsave':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'setting': setting }) hostfile = open(hostpath, 'w') hostfile.write(hosts) hostfile.close() #clusterfile = open(clusterpath, 'w') vcluster = VCluster(clusterid,clustername,username,'stopped',clustersize,clustersize,proxy_server_ip,proxy_public_ip) for con in containers: vcluster.containers.append(con) db.session.add(vcluster) db_commit() #proxy_url = env.getenv("PORTAL_URL") +"/"+ proxy_public_ip +"/_web/" + username + "/" + clustername #info = {'clusterid':clusterid, 'status':'stopped', 'size':clustersize, 'containers':containers, 'nextcid': clustersize, 'create_time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'start_time':"------"} #info['proxy_url'] = proxy_url #info['proxy_server_ip'] = proxy_server_ip #info['proxy_public_ip'] = proxy_public_ip #info['port_mapping'] = [] #clusterfile.write(json.dumps(info)) #clusterfile.close() return [True, str(vcluster)] def scale_out_cluster(self,clustername,username, image,user_info, setting): if not self.is_cluster(clustername,username): return [False, "cluster:%s not found" % clustername] if self.imgmgr.get_image_size(image) + 100 > int(setting["disk"]): return [False, "the size of disk is not big enough for the image"] workers = self.nodemgr.get_base_nodeips() if (len(workers) == 0): logger.warning("no workers to start containers, scale out failed") return [False, "no workers are running"] image_json = json.dumps(image) [status, result] = self.networkmgr.acquire_userips_cidr(username) gateway = self.networkmgr.get_usergw(username) #vlanid = self.networkmgr.get_uservlanid(username) self.networkmgr.printpools() if not status: return [False, result] ip = result[0] [status, clusterinfo] = self.get_clusterinfo(clustername,username) clusterid = clusterinfo['clusterid'] clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername hostpath = self.fspath + "/global/users/" + username + "/hosts/" + str(clusterid) + ".hosts" cid = clusterinfo['nextcid'] workerip = workers[random.randint(0, len(workers)-1)] oneworker = self.nodemgr.ip_to_rpc(workerip) lxc_name = username + "-" + str(clusterid) + "-" + str(cid) hostname = "host-" + str(cid) proxy_server_ip = clusterinfo['proxy_server_ip'] proxy_public_ip = clusterinfo['proxy_public_ip'] uid = json.loads(user_info)["data"]["id"] [success, message] = oneworker.create_container(lxc_name, proxy_public_ip, username, uid, json.dumps(setting), clustername, clusterid, str(cid), hostname, ip, gateway, image_json) if success is False: self.networkmgr.release_userips(username, ip) logger.info("create container failed, so scale out failed") return [False, message] if clusterinfo['status'] == "running": self.networkmgr.check_usergre(username, uid, workerip, self.nodemgr, self.distributedgw=='True') oneworker.start_container(lxc_name) oneworker.start_services(lxc_name, ["ssh"]) # TODO: need fix namesplit = lxc_name.split('-') portname = namesplit[1] + '-' + namesplit[2] oneworker.recover_usernet(portname, uid, proxy_server_ip, workerip==proxy_server_ip) logger.info("scale out success") hostfile = open(hostpath, 'a') hostfile.write(ip.split("/")[0] + "\t" + hostname + "\t" + hostname + "." + clustername + "\n") hostfile.close() [success,vcluster] = self.get_vcluster(clustername,username) if not success: return [False, "Fail to write info."] vcluster.nextcid = int(clusterinfo['nextcid']) + 1 vcluster.size = int(clusterinfo['size']) + 1 vcluster.containers.append(Container(lxc_name,hostname,ip,workerip,image['name'],datetime.datetime.now(),setting)) #{'containername':lxc_name, 'hostname':hostname, 'ip':ip, 'host':workerip, 'image':image['name'], 'lastsave':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'setting': setting}) db.session.add(vcluster) db_commit() return [True, clusterinfo] def addproxy(self,username,clustername,ip,port): [status, clusterinfo] = self.get_clusterinfo(clustername, username) if 'proxy_ip' in clusterinfo: return [False, "proxy already exists"] target = "http://" + ip + ":" + port + "/" clusterinfo['proxy_ip'] = ip + ":" + port if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip']) worker.set_route("/"+ clusterinfo['proxy_public_ip'] + "/_web/" + username + "/" + clustername, target) else: proxytool.set_route("/" + clusterinfo['proxy_public_ip'] + "/_web/" + username + "/" + clustername, target) clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w') clusterfile.write(json.dumps(clusterinfo)) clusterfile.close() return [True, clusterinfo] def deleteproxy(self, username, clustername): [status, clusterinfo] = self.get_clusterinfo(clustername, username) if 'proxy_ip' not in clusterinfo: return [True, clusterinfo] clusterinfo.pop('proxy_ip') if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip']) worker.delete_route("/" + clusterinfo['proxy_public_ip'] + "/_web/" + username + "/" + clustername) else: proxytool.delete_route("/" + clusterinfo['proxy_public_ip'] + "/_web/" + username + "/" + clustername) clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w') clusterfile.write(json.dumps(clusterinfo)) clusterfile.close() return [True, clusterinfo] def count_port_mapping(self, username): return sum([len(self.get_clusterinfo(cluster, username)[1]['port_mapping']) for cluster in self.list_clusters(username)[1]]) def add_port_mapping(self,username,clustername,node_name,node_ip,port,quota): port_mapping_count = self.count_port_mapping(username) if port_mapping_count >= int(quota['portmapping']): return [False, 'Port mapping quota exceed.'] [status, clusterinfo] = self.get_clusterinfo(clustername, username) if clusterinfo['status'] == 'stopped': return [False, 'Please start the clusters first.'] host_port = 0 if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip']) [success, host_port] = worker.acquire_port_mapping(node_name, node_ip, port) else: [success, host_port] = portcontrol.acquire_port_mapping(node_name, node_ip, port) if not success: return [False, host_port] [status,vcluster] = self.get_vcluster(clustername,username) if not status: return [False,"VCluster not found."] vcluster.port_mapping.append(PortMapping(node_name,node_ip,port,host_port)) db.session.add(vcluster) db_commit() return [True, json.loads(str(vcluster))] def recover_port_mapping(self,username,clustername): [status, clusterinfo] = self.get_clusterinfo(clustername, username) for rec in clusterinfo['port_mapping']: if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip']) [success, host_port] = worker.acquire_port_mapping(rec['node_name'], rec['node_ip'], rec['node_port'], rec['host_port']) else: [success, host_port] = portcontrol.acquire_port_mapping(rec['node_name'], rec['node_ip'], rec['node_port'], rec['host_port']) if not success: return [False, host_port] return [True, clusterinfo] def delete_all_port_mapping(self, username, clustername, node_name): [status, vcluster] = self.get_vcluster(clustername, username) if not status: return [False,"VCluster not found."] error_msg = None delete_list = [] for item in vcluster.port_mapping: if item.node_name == node_name: node_ip = item.node_ip node_port = item.node_port if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(vcluster.proxy_server_ip) [success,msg] = worker.release_port_mapping(node_name, node_ip, str(node_port)) else: [success,msg] = portcontrol.release_port_mapping(node_name, node_ip, str(node_port)) if not success: error_msg = msg else: delete_list.append(item) if len(delete_list) > 0: for item in delete_list: db.session.delete(item) db_commit() else: return [True,"No port mapping."] if error_msg is not None: return [False,error_msg] else: return [True,"Success"] def delete_port_mapping(self, username, clustername, node_name, node_port): [status, vcluster] = self.get_vcluster(clustername, username) if not status: return [False,"VCluster not found."] for item in vcluster.port_mapping: if item.node_name == node_name and str(item.node_port) == str(node_port): node_ip = item.node_ip node_port = item.node_port if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(vcluster.proxy_server_ip) [success,msg] = worker.release_port_mapping(node_name, node_ip, str(node_port)) else: [success,msg] = portcontrol.release_port_mapping(node_name, node_ip, str(node_port)) db.session.delete(item) db_commit() if success: return [True, json.loads(str(vcluster))] else: return [False, msg] return [False, "No port mapping."] def flush_cluster(self,username,clustername,containername): begintime = datetime.datetime.now() [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] containers = info['containers'] imagetmp = username + "_tmp_docklet" for container in containers: if container['containername'] == containername: logger.info("container: %s found" % containername) worker = self.nodemgr.ip_to_rpc(container['host']) worker.create_image(username,imagetmp,containername) fimage = container['image'] logger.info("image: %s created" % imagetmp) break else: logger.error("container: %s not found" % containername) for container in containers: if container['containername'] != containername: logger.info("container: %s now flush" % container['containername']) worker = self.nodemgr.ip_to_rpc(container['host']) #t = threading.Thread(target=onework.flush_container,args=(username,imagetmp,container['containername'])) #threads.append(t) worker.flush_container(username,imagetmp,container['containername']) container['lastsave'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") container['image'] = fimage logger.info("thread for container: %s has been prepared" % container['containername']) clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername infofile = open(clusterpath,'w') infofile.write(json.dumps(info)) infofile.close() self.imgmgr.removeImage(username,imagetmp) endtime = datetime.datetime.now() dtime = (endtime - begintime).seconds logger.info("flush spend %s seconds" % dtime) logger.info("flush success") def image_check(self,username,imagename): imagepath = self.fspath + "/global/images/private/" + username + "/" if os.path.exists(imagepath + imagename): return [False, "image already exists"] else: return [True, "image not exists"] def create_image(self,username,clustername,containername,imagename,description,imagenum=10): [status, vcluster] = self.get_vcluster(clustername,username) if not status: return [False, "cluster not found"] containers = vcluster.containers for container in containers: if container.containername == containername: logger.info("container: %s found" % containername) worker = self.nodemgr.ip_to_rpc(container.host) if worker is None: return [False, "The worker %s can't be found or has been stopped." % container.host] res = worker.create_image(username,imagename,containername,description,imagenum) container.lastsave = datetime.datetime.now() container.image = imagename break else: res = [False, "container not found"] logger.error("container: %s not found" % containername) db_commit() return res def delete_cluster(self, clustername, username, user_info): [status, vcluster] = self.get_vcluster(clustername, username) if not status: return [False, "cluster not found"] if vcluster.status =='running': return [False, "cluster is still running, you need to stop it and then delete"] ips = [] for container in vcluster.containers: worker = self.nodemgr.ip_to_rpc(container.host) if worker is None: return [False, "The worker %s can't be found or has been stopped." % container.host] worker.delete_container(container.containername) db.session.delete(container) ips.append(container.ip) logger.info("delete vcluster and release vcluster ips") self.networkmgr.release_userips(username, ips) self.networkmgr.printpools() #os.remove(self.fspath+"/global/users/"+username+"/clusters/"+clustername) for bh in vcluster.billing_history: db.session.delete(bh) db.session.delete(vcluster) db_commit() os.remove(self.fspath+"/global/users/"+username+"/hosts/"+str(vcluster.clusterid)+".hosts") groupname = json.loads(user_info)["data"]["group"] uid = json.loads(user_info)["data"]["id"] [status, clusters] = self.list_clusters(username) if len(clusters) == 0: self.networkmgr.del_user(username) self.networkmgr.del_usrgwbr(username, uid, self.nodemgr) #logger.info("vlanid release triggered") return [True, "cluster delete"] def scale_in_cluster(self, clustername, username, containername): [status, vcluster] = self.get_vcluster(clustername, username) if not status: return [False, "cluster not found"] new_containers = [] for container in vcluster.containers: if container.containername == containername: worker = self.nodemgr.ip_to_rpc(container.host) if worker is None: return [False, "The worker %s can't be found or has been stopped." % container.host] worker.delete_container(containername) db.session.delete(container) self.networkmgr.release_userips(username, container.ip) self.networkmgr.printpools() vcluster.size -= 1 cid = containername[containername.rindex("-")+1:] clusterid = vcluster.clusterid hostpath = self.fspath + "/global/users/" + username + "/hosts/" + str(clusterid) + ".hosts" db_commit() hostfile = open(hostpath, 'r') hostinfo = hostfile.readlines() hostfile.close() hostfile = open(hostpath, 'w') new_hostinfo = [] new_hostinfo.append(hostinfo[0]) for host in hostinfo[1:]: parts = host.split("\t") if parts[1][parts[1].rindex("-")+1:] == cid: pass else: new_hostinfo.append(host) hostfile.writelines(new_hostinfo) hostfile.close() [success, msg] = self.delete_all_port_mapping(username, clustername, containername) if not success: return [False, msg] [status, info] = self.get_clusterinfo(clustername, username) return [True, info] def get_clustersetting(self, clustername, username, containername, allcontainer): [status,vcluster] = self.get_vcluster(clustername,username) if vcluster is None: logger.error("cluster file: %s not found" % clustername) return [False, "cluster file not found"] cpu = 0 memory = 0 disk = 0 if allcontainer: for container in vcluster.containers: cpu += int(container.setting_cpu) memory += int(container.setting_mem) disk += int(container.setting_disk) else: for container in vcluster.containers: if container.containername == containername: cpu += int(container.setting_cpu) memory += int(container.setting_mem) disk += int(container.setting_disk) return [True, {'cpu':cpu, 'memory':memory, 'disk':disk}] def update_cluster_baseurl(self, clustername, username, oldip, newip): [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] logger.info("%s %s:base_url need to be modified(%s %s)."%(username,clustername,oldip,newip)) for container in info['containers']: worker = self.nodemgr.ip_to_rpc(container['host']) #if worker is None: # return [False, "The worker can't be found or has been stopped."] self.nodemgr.call_rpc_function(worker,'update_baseurl',[container['containername'],oldip,newip]) self.nodemgr.call_rpc_function(worker,'stop_container',[container['containername']]) def check_public_ip(self, clustername, username): [status, info] = self.get_clusterinfo(clustername, username) [status, proxy_public_ip] = self.etcd.getkey("machines/publicIP/"+info['proxy_server_ip']) if not info['proxy_public_ip'] == proxy_public_ip: logger.info("%s %s proxy_public_ip has been changed, base_url need to be modified."%(username,clustername)) oldpublicIP= info['proxy_public_ip'] self.update_proxy_ipAndurl(clustername,username,info['proxy_server_ip']) self.update_cluster_baseurl(clustername,username,oldpublicIP,proxy_public_ip) return False else: return True def start_cluster(self, clustername, username, user_info): uid = user_info['data']['id'] input_rate_limit = user_info['data']['groupinfo']['input_rate_limit'] output_rate_limit = user_info['data']['groupinfo']['output_rate_limit'] [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] if info['status'] == 'running': return [False, "cluster is already running"] # set proxy try: target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000" if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip']) # check public ip if not self.check_public_ip(clustername,username): [status, info] = self.get_clusterinfo(clustername, username) worker.set_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername, target) else: if not info['proxy_server_ip'] == self.addr: logger.info("%s %s proxy_server_ip has been changed, base_url need to be modified."%(username,clustername)) oldpublicIP= info['proxy_public_ip'] self.update_proxy_ipAndurl(clustername,username,self.addr) [status, info] = self.get_clusterinfo(clustername, username) self.update_cluster_baseurl(clustername,username,oldpublicIP,info['proxy_public_ip']) # check public ip if not self.check_public_ip(clustername,username): [status, info] = self.get_clusterinfo(clustername, username) proxytool.set_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername, target) except: logger.info(traceback.format_exc()) return [False, "start cluster failed with setting proxy failed"] # check gateway for user # after reboot, user gateway goes down and lose its configuration # so, check is necessary self.networkmgr.check_usergw(input_rate_limit, output_rate_limit, username, uid, self.nodemgr,self.distributedgw=='True') # start containers for container in info['containers']: # set up gre from user's gateway host to container's host. self.networkmgr.check_usergre(username, uid, container['host'], self.nodemgr, self.distributedgw=='True') worker = self.nodemgr.ip_to_rpc(container['host']) if worker is None: return [False, "The worker %s can't be found or has been stopped." % container['host']] worker.start_container(container['containername']) worker.start_services(container['containername']) namesplit = container['containername'].split('-') portname = namesplit[1] + '-' + namesplit[2] worker.recover_usernet(portname, uid, info['proxy_server_ip'], container['host']==info['proxy_server_ip']) [status,vcluster] = self.get_vcluster(clustername,username) vcluster.status ='running' vcluster.start_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") vcluster.is_warned = False if not db_commit(): return [False, "Commit Errror"] return [True, "start cluster"] def mount_cluster(self, clustername, username): [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] for container in info['containers']: worker = self.nodemgr.ip_to_rpc(container['host']) if worker is None: return [False, "The worker %s can't be found or has been stopped." % container['host']] worker.mount_container(container['containername']) return [True, "mount cluster"] def recover_cluster_on(self, host): logger.info("start to recover clusters which has containers on host %s" % host) vcs = VCluster.query.filter(VCluster.status.in_(["running","error"])).all() need_recovers = [] for vc in vcs: if len(vc.containers.filter_by(host=host).all()) > 0: need_recovers.append([vc.clustername, vc.ownername]) self.recover_clusters(need_recovers) def recover_clusters(self, clusters_users): logger.info("start to recover clusters:"+ str(clusters_users)) clusters = [d[0] for d in clusters_users] users = set([d[1] for d in clusters_users]) auth_key = env.getenv('AUTH_KEY') res = post_to_user("/master/user/groupinfo/", {'auth_key':auth_key}) #logger.info(res) groups = json.loads(res['groups']) quotas = {} for group in groups: #logger.info(group) quotas[group['name']] = group['quotas'] recover_data = {} recover_queue = Queue(maxsize=0) for user in users: recover_info = post_to_user("/master/user/recoverinfo/", {'username':user,'auth_key':auth_key}) recover_data[user] = {} recover_data[user]['uid'] = recover_info['uid'] recover_data[user]['groupname'] = recover_info['groupname'] for d in clusters_users: recover_queue.put({"user": d[1], "vcluster":d[0], "times":0}) now_times = 0 sleep_time = 10 max_times = 1 while not recover_queue.empty(): q = recover_queue.get() if q['times'] >= max_times: logger.error("recovering retry more than %d times, return" % max_times) return if q['times'] > now_times: logger.error("recovering retry %d times, sleep %d seconds" % (q['times'], sleep_time)) time.sleep(sleep_time) sleep_time = 2 * sleep_time now_times = q['times'] user = q['user'] cluster = q['vcluster'] logger.info ("recovering cluster:%s for user:%s %d times..." % (cluster, user, q['times'])) uid = recover_data[user]['uid'] groupname = recover_data[user]['groupname'] input_rate_limit = quotas[groupname]['input_rate_limit'] output_rate_limit = quotas[groupname]['output_rate_limit'] success1, msg = self.recover_cluster(cluster, user, uid, input_rate_limit, output_rate_limit) success2, cludb = self.get_vcluster(cluster, user) if not success2: logger.error("cannot find vcluster %s" % cluster) continue if not success1: logger.error(msg) q["times"] += 1 recover_queue.put(q) cludb.status = "error" else: cludb.status = "running" db_commit() def recover_cluster(self, clustername, username, uid, input_rate_limit, output_rate_limit): [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] if info['status'] == 'stopped': return [True, "cluster no need to start"] # recover proxy of cluster try: target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000" if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip']) # check public ip if not self.check_public_ip(clustername,username): [status, info] = self.get_clusterinfo(clustername, username) try: self.nodemgr.call_rpc_function(worker,'set_route',["/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername, target]) except Exception as err: logger.error(traceback.format_exc()) return [False, "fail to set proxy on node %s" % info['proxy_server_ip']] else: if not info['proxy_server_ip'] == self.addr: logger.info("%s %s proxy_server_ip has been changed, base_url need to be modified."%(username,clustername)) oldpublicIP= info['proxy_public_ip'] self.update_proxy_ipAndurl(clustername,username,self.addr) [status, info] = self.get_clusterinfo(clustername, username) self.update_cluster_baseurl(clustername,username,oldpublicIP,info['proxy_public_ip']) # check public ip if not self.check_public_ip(clustername,username): [status, info] = self.get_clusterinfo(clustername, username) proxytool.set_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername, target) except: return [False, "start cluster failed with setting proxy failed"] # need to check and recover gateway of this user self.networkmgr.check_usergw(input_rate_limit, output_rate_limit, username, uid, self.nodemgr,self.distributedgw=='True') # recover containers of this cluster for container in info['containers']: # set up gre from user's gateway host to container's host. self.networkmgr.check_usergre(username, uid, container['host'], self.nodemgr, self.distributedgw=='True') worker = self.nodemgr.ip_to_rpc(container['host']) if worker is None: return [False, "The worker %s can't be found or has been stopped." % container['host']] try: self.nodemgr.call_rpc_function(worker,'recover_container',[container['containername']]) namesplit = container['containername'].split('-') portname = namesplit[1] + '-' + namesplit[2] self.nodemgr.call_rpc_function(worker,'recover_usernet',[portname, uid, info['proxy_server_ip'], container['host']==info['proxy_server_ip']]) except Exception as err: logger.error(traceback.format_exc()) return [False, "fail to recover container or usernet on node %s" % container['host']] # recover ports mapping [success, msg] = self.recover_port_mapping(username,clustername) if not success: return [False, msg] return [True, "start cluster"] # maybe here should use cluster id def stop_cluster(self, clustername, username): [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] if info['status'] == 'stopped': return [False, 'cluster is already stopped'] if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip']) worker.delete_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername) else: proxytool.delete_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername) for container in info['containers']: self.delete_all_port_mapping(username,clustername,container['containername']) worker = self.nodemgr.ip_to_rpc(container['host']) if worker is None: return [False, "The worker %s can't be found or has been stopped." % container['host']] worker.stop_container(container['containername']) [status, vcluster] = self.get_vcluster(clustername, username) vcluster.status = 'stopped' vcluster.start_time ="------" vcluster.stop_time = datetime.datetime.now() if not db_commit(): return [False, "Commit Errror"] return [True, "stop cluster"] def detach_cluster(self, clustername, username): [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] if info['status'] == 'running': return [False, 'cluster is running, please stop it first'] for container in info['containers']: worker = self.nodemgr.ip_to_rpc(container['host']) if worker is None: return [False, "The worker %s can't be found or has been stopped." % container['host']] worker.detach_container(container['containername']) return [True, "detach cluster"] def list_clusters(self, user): clusters = VCluster.query.filter_by(ownername = user).all() clusters = [clu.clustername for clu in clusters] '''full_clusters = [] for cluster in clusters: single_cluster = {} single_cluster['name'] = cluster [status, info] = self.get_clusterinfo(cluster,user) if info['status'] == 'running': single_cluster['status'] = 'running' else: single_cluster['status'] = 'stopping' full_clusters.append(single_cluster)''' return [True, clusters] def migrate_container(self, clustername, username, containername, new_host, user_info): [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] if info['status'] != 'stopped': return [False, 'cluster is not stopped'] con_db = Container.query.get(containername) if con_db is None: return [False, 'Container not found'] if con_db.host == new_host: return [False, 'Container has been on the new host'] oldworker = self.nodemgr.ip_to_rpc(con_db.host) if oldworker is None: return [False, "Old host worker %s can't be found or has been stopped." % con_db.host] oldworker.stop_container(containername) imagename = "migrate-" + containername + "-" + datetime.datetime.now().strftime("%Y-%m-%d") logger.info("Save Image for container:%s imagename:%s host:%s"%(containername, imagename, con_db.host)) status,msg = oldworker.create_image(username,imagename,containername,"",10000) if not status: return [False, msg] #con_db.lastsave = datetime.datetime.now() #con_db.image = imagename self.networkmgr.load_usrgw(username) proxy_server_ip = self.networkmgr.usrgws[username] [status, proxy_public_ip] = self.etcd.getkey("machines/publicIP/"+proxy_server_ip) if not status: self.imgmgr.removeImage(username,imagename) logger.error("Fail to get proxy_public_ip %s."%(proxy_server_ip)) return [False, "Fail to get proxy server public IP."] uid = user_info['data']['id'] setting = { 'cpu': con_db.setting_cpu, 'memory': con_db.setting_mem, 'disk': con_db.setting_disk } _, clusterid, cid = containername.split('-') hostname = "host-"+str(cid) gateway = self.networkmgr.get_usergw(username) image = {'name':imagename,'type':'private','owner':username } logger.info("Migrate: proxy_ip:%s uid:%s setting:%s clusterid:%s cid:%s hostname:%s gateway:%s image:%s" %(proxy_public_ip, str(uid), str(setting), clusterid, cid, hostname, gateway, str(image))) logger.info("Migrate: create container(%s) on new host %s"%(containername, new_host)) worker = self.nodemgr.ip_to_rpc(new_host) if worker is None: self.imgmgr.removeImage(username,imagename) return [False, "New host worker %s can't be found or has been stopped." % new_host] status,msg = worker.create_container(containername, proxy_public_ip, username, uid, json.dumps(setting), clustername, str(clusterid), str(cid), hostname, con_db.ip, gateway, json.dumps(image)) if not status: self.imgmgr.removeImage(username,imagename) return [False, msg] con_db.host = new_host try: db.session.commit() except Exception as err: logger.error(traceback.format_exc()) db.session.rollback() worker.delete_container(containername) self.imgmgr.removeImage(username,imagename) return [False, "Database commit error!"] oldworker.delete_container(containername) self.imgmgr.removeImage(username,imagename) return [True,""] def migrate_cluster(self, clustername, username, src_host, new_host_list, user_info): [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] prestatus = info['status'] self.stop_cluster(clustername, username) for container in info['containers']: if not container['host'] == src_host: continue random.shuffle(new_host_list) for new_host in new_host_list: status,msg = self.migrate_container(clustername,username,container['containername'],new_host,user_info) if status: break else: logger.error(msg) else: if prestatus == 'running': self.start_cluster(clustername, username, user_info) return [False, msg] logger.info("[Migrate] prestatus:%s for cluster(%s) user(%s)"%(prestatus, clustername, username)) if prestatus == 'running': status, msg = self.start_cluster(clustername, username, user_info) if not status: return [False, msg] return [True, ""] def migrate_host(self, src_host, new_host_list, ulockmgr): [status, vcluster_list] = self.get_all_clusterinfo() if not status: return [False, vcluster_list] auth_key = env.getenv('AUTH_KEY') res = post_to_user("/master/user/groupinfo/", {'auth_key':auth_key}) groups = json.loads(res['groups']) quotas = {} for group in groups: quotas[group['name']] = group['quotas'] for vcluster in vcluster_list: if 'ownername' not in vcluster.keys(): return [Flase, 'Ownername not in vcluster(%s).keys' % str(vcluster) ] try: username = vcluster['ownername'] ulockmgr.acquire(username) clustername = vcluster['clustername'] rc_info = post_to_user("/master/user/recoverinfo/", {'username':username,'auth_key':auth_key}) groupname = rc_info['groupname'] user_info = {"data":{"id":rc_info['uid'],"groupinfo":quotas[groupname]}} self.migrate_cluster(clustername, username, src_host, new_host_list, user_info) except Exception as ex: ulockmgr.release(username) logger.error(traceback.format_exc()) return [False, str(ex)] ulockmgr.release(username) return [True, ""] def is_cluster(self, clustername, username): [status, clusters] = self.list_clusters(username) if clustername in clusters: return True else: return False # get id from name def get_clusterid(self, clustername, username): [status, info] = self.get_clusterinfo(clustername, username) if not status: return -1 if 'clusterid' in info: return int(info['clusterid']) logger.error ("internal error: cluster:%s info file has no clusterid " % clustername) return -1 def update_proxy_ipAndurl(self, clustername, username, proxy_server_ip): [status, vcluster] = self.get_vcluster(clustername, username) if not status: return [False, "cluster not found"] vcluster.proxy_server_ip = proxy_server_ip [status, proxy_public_ip] = self.etcd.getkey("machines/publicIP/"+proxy_server_ip) if not status: logger.error("Fail to get proxy_public_ip %s."%(proxy_server_ip)) proxy_public_ip = proxy_server_ip vcluster.proxy_public_ip = proxy_public_ip #proxy_url = env.getenv("PORTAL_URL") +"/"+ proxy_public_ip +"/_web/" + username + "/" + clustername #info['proxy_url'] = proxy_url if not db_commit(): return [False, "Commit Errror"] return proxy_public_ip def get_clusterinfo(self, clustername, username): [success,vcluster] = self.get_vcluster(clustername,username) if vcluster is None: return [False, "cluster not found"] vcluster = json.loads(str(vcluster)) return [True, vcluster] def get_vcluster(self, clustername, username): vcluster = VCluster.query.filter_by(ownername=username,clustername=clustername).first() if vcluster is None: return [False, None] else: return [True, vcluster] def get_all_clusterinfo(self): vcluster_list = VCluster.query.all() #logger.info(str(vcluster_list)) if vcluster_list is None: return [False, None] else: return [True, json.loads(str(vcluster_list))] # acquire cluster id from etcd def _acquire_id(self): self.clusterid_locks.acquire() clusterid = self.etcd.getkey("vcluster/nextid")[1] self.etcd.setkey("vcluster/nextid", str(int(clusterid)+1)) self.clusterid_locks.release() return int(clusterid) ================================================ FILE: src/protos/rpc.proto ================================================ syntax = "proto3"; service Master { rpc report (ReportMsg) returns (Reply) {} } service Worker { rpc start_vnode (VNodeInfo) returns (Reply) {} rpc start_task (TaskInfo) returns (Reply) {} rpc stop_task (TaskInfo) returns (Reply) {} rpc stop_vnode (VNodeInfo) returns (Reply) {} } message VNodeInfo { string taskid = 1; string username = 2; int32 vnodeid = 3; VNode vnode = 4; // 集群配置 } message Reply { ReplyStatus status = 1; // 返回值 string message = 2; enum ReplyStatus { ACCEPTED = 0; REFUSED = 1; } } message ReportMsg { repeated TaskMsg taskmsgs = 1; } message TaskMsg { string taskid = 1; string username = 2; int32 vnodeid = 3; Status subTaskStatus = 4; // 任务状态 string token = 5; string errmsg = 6; } enum Status { WAITING = 0; RUNNING = 1; COMPLETED = 2; FAILED = 3; TIMEOUT = 4; OUTPUTERROR = 5; } message TaskInfo { string taskid = 1; string username = 2; int32 vnodeid = 3; Parameters parameters = 4; // 参数 int32 timeout = 5; // 超时阈值 string token = 6; } message Parameters { Command command = 1; // 命令配置 string stderrRedirectPath = 2; // 错误输出重定向 string stdoutRedirectPath = 3; // 标准输出重定向 } message Command { string commandLine = 1; // 命令 string packagePath = 2; // 工作路径 map envVars = 3; // 自定义环境变量 } message VNode { Image image = 1; // 镜像配置 Instance instance = 2; // 实例配置 repeated Mount mount = 3; // 挂载配置 Network network = 4; //网络配置 string hostname = 5; //主机名 } message Network { string ipaddr = 1; string gateway = 2; string masterip = 3; string brname = 4; } message Image { string name = 1; // 镜像名 ImageType type = 2; // 镜像类型(public/private) string owner = 3; // 所有者 enum ImageType { BASE = 0; PUBLIC = 1; PRIVATE = 2; } } message Mount { string provider = 1; string localPath = 2; // 本地路径 string remotePath = 3; // 远程路径 string accessKey = 4; string secretKey = 5; string other = 6; } message Instance { int32 cpu = 1; // CPU,单位 个? int32 memory = 2; // 内存,单位 mb int32 disk = 3; // 磁盘,单位 mb int32 gpu = 4; // 显卡,单位 个 } ================================================ FILE: src/protos/rpc_pb2.py ================================================ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: rpc.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='rpc.proto', package='', syntax='proto3', serialized_pb=_b('\n\trpc.proto\"U\n\tVNodeInfo\x12\x0e\n\x06taskid\x18\x01 \x01(\t\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x0f\n\x07vnodeid\x18\x03 \x01(\x05\x12\x15\n\x05vnode\x18\x04 \x01(\x0b\x32\x06.VNode\"f\n\x05Reply\x12\"\n\x06status\x18\x01 \x01(\x0e\x32\x12.Reply.ReplyStatus\x12\x0f\n\x07message\x18\x02 \x01(\t\"(\n\x0bReplyStatus\x12\x0c\n\x08\x41\x43\x43\x45PTED\x10\x00\x12\x0b\n\x07REFUSED\x10\x01\"\'\n\tReportMsg\x12\x1a\n\x08taskmsgs\x18\x01 \x03(\x0b\x32\x08.TaskMsg\"{\n\x07TaskMsg\x12\x0e\n\x06taskid\x18\x01 \x01(\t\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x0f\n\x07vnodeid\x18\x03 \x01(\x05\x12\x1e\n\rsubTaskStatus\x18\x04 \x01(\x0e\x32\x07.Status\x12\r\n\x05token\x18\x05 \x01(\t\x12\x0e\n\x06\x65rrmsg\x18\x06 \x01(\t\"~\n\x08TaskInfo\x12\x0e\n\x06taskid\x18\x01 \x01(\t\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x0f\n\x07vnodeid\x18\x03 \x01(\x05\x12\x1f\n\nparameters\x18\x04 \x01(\x0b\x32\x0b.Parameters\x12\x0f\n\x07timeout\x18\x05 \x01(\x05\x12\r\n\x05token\x18\x06 \x01(\t\"_\n\nParameters\x12\x19\n\x07\x63ommand\x18\x01 \x01(\x0b\x32\x08.Command\x12\x1a\n\x12stderrRedirectPath\x18\x02 \x01(\t\x12\x1a\n\x12stdoutRedirectPath\x18\x03 \x01(\t\"\x8b\x01\n\x07\x43ommand\x12\x13\n\x0b\x63ommandLine\x18\x01 \x01(\t\x12\x13\n\x0bpackagePath\x18\x02 \x01(\t\x12&\n\x07\x65nvVars\x18\x03 \x03(\x0b\x32\x15.Command.EnvVarsEntry\x1a.\n\x0c\x45nvVarsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x7f\n\x05VNode\x12\x15\n\x05image\x18\x01 \x01(\x0b\x32\x06.Image\x12\x1b\n\x08instance\x18\x02 \x01(\x0b\x32\t.Instance\x12\x15\n\x05mount\x18\x03 \x03(\x0b\x32\x06.Mount\x12\x19\n\x07network\x18\x04 \x01(\x0b\x32\x08.Network\x12\x10\n\x08hostname\x18\x05 \x01(\t\"L\n\x07Network\x12\x0e\n\x06ipaddr\x18\x01 \x01(\t\x12\x0f\n\x07gateway\x18\x02 \x01(\t\x12\x10\n\x08masterip\x18\x03 \x01(\t\x12\x0e\n\x06\x62rname\x18\x04 \x01(\t\"t\n\x05Image\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1e\n\x04type\x18\x02 \x01(\x0e\x32\x10.Image.ImageType\x12\r\n\x05owner\x18\x03 \x01(\t\".\n\tImageType\x12\x08\n\x04\x42\x41SE\x10\x00\x12\n\n\x06PUBLIC\x10\x01\x12\x0b\n\x07PRIVATE\x10\x02\"u\n\x05Mount\x12\x10\n\x08provider\x18\x01 \x01(\t\x12\x11\n\tlocalPath\x18\x02 \x01(\t\x12\x12\n\nremotePath\x18\x03 \x01(\t\x12\x11\n\taccessKey\x18\x04 \x01(\t\x12\x11\n\tsecretKey\x18\x05 \x01(\t\x12\r\n\x05other\x18\x06 \x01(\t\"B\n\x08Instance\x12\x0b\n\x03\x63pu\x18\x01 \x01(\x05\x12\x0e\n\x06memory\x18\x02 \x01(\x05\x12\x0c\n\x04\x64isk\x18\x03 \x01(\x05\x12\x0b\n\x03gpu\x18\x04 \x01(\x05*[\n\x06Status\x12\x0b\n\x07WAITING\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\r\n\tCOMPLETED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\x0b\n\x07TIMEOUT\x10\x04\x12\x0f\n\x0bOUTPUTERROR\x10\x05\x32(\n\x06Master\x12\x1e\n\x06report\x12\n.ReportMsg\x1a\x06.Reply\"\x00\x32\x96\x01\n\x06Worker\x12#\n\x0bstart_vnode\x12\n.VNodeInfo\x1a\x06.Reply\"\x00\x12!\n\nstart_task\x12\t.TaskInfo\x1a\x06.Reply\"\x00\x12 \n\tstop_task\x12\t.TaskInfo\x1a\x06.Reply\"\x00\x12\"\n\nstop_vnode\x12\n.VNodeInfo\x1a\x06.Reply\"\x00\x62\x06proto3') ) _STATUS = _descriptor.EnumDescriptor( name='Status', full_name='Status', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='WAITING', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='RUNNING', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='COMPLETED', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='FAILED', index=3, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='TIMEOUT', index=4, number=4, options=None, type=None), _descriptor.EnumValueDescriptor( name='OUTPUTERROR', index=5, number=5, options=None, type=None), ], containing_type=None, options=None, serialized_start=1249, serialized_end=1340, ) _sym_db.RegisterEnumDescriptor(_STATUS) Status = enum_type_wrapper.EnumTypeWrapper(_STATUS) WAITING = 0 RUNNING = 1 COMPLETED = 2 FAILED = 3 TIMEOUT = 4 OUTPUTERROR = 5 _REPLY_REPLYSTATUS = _descriptor.EnumDescriptor( name='ReplyStatus', full_name='Reply.ReplyStatus', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='ACCEPTED', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='REFUSED', index=1, number=1, options=None, type=None), ], containing_type=None, options=None, serialized_start=162, serialized_end=202, ) _sym_db.RegisterEnumDescriptor(_REPLY_REPLYSTATUS) _IMAGE_IMAGETYPE = _descriptor.EnumDescriptor( name='ImageType', full_name='Image.ImageType', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='BASE', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='PUBLIC', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='PRIVATE', index=2, number=2, options=None, type=None), ], containing_type=None, options=None, serialized_start=1014, serialized_end=1060, ) _sym_db.RegisterEnumDescriptor(_IMAGE_IMAGETYPE) _VNODEINFO = _descriptor.Descriptor( name='VNodeInfo', full_name='VNodeInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='taskid', full_name='VNodeInfo.taskid', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='username', full_name='VNodeInfo.username', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='vnodeid', full_name='VNodeInfo.vnodeid', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='vnode', full_name='VNodeInfo.vnode', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=13, serialized_end=98, ) _REPLY = _descriptor.Descriptor( name='Reply', full_name='Reply', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='status', full_name='Reply.status', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='message', full_name='Reply.message', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ _REPLY_REPLYSTATUS, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=100, serialized_end=202, ) _REPORTMSG = _descriptor.Descriptor( name='ReportMsg', full_name='ReportMsg', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='taskmsgs', full_name='ReportMsg.taskmsgs', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=204, serialized_end=243, ) _TASKMSG = _descriptor.Descriptor( name='TaskMsg', full_name='TaskMsg', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='taskid', full_name='TaskMsg.taskid', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='username', full_name='TaskMsg.username', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='vnodeid', full_name='TaskMsg.vnodeid', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='subTaskStatus', full_name='TaskMsg.subTaskStatus', index=3, number=4, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='token', full_name='TaskMsg.token', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='errmsg', full_name='TaskMsg.errmsg', index=5, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=245, serialized_end=368, ) _TASKINFO = _descriptor.Descriptor( name='TaskInfo', full_name='TaskInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='taskid', full_name='TaskInfo.taskid', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='username', full_name='TaskInfo.username', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='vnodeid', full_name='TaskInfo.vnodeid', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='parameters', full_name='TaskInfo.parameters', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='timeout', full_name='TaskInfo.timeout', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='token', full_name='TaskInfo.token', index=5, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=370, serialized_end=496, ) _PARAMETERS = _descriptor.Descriptor( name='Parameters', full_name='Parameters', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='command', full_name='Parameters.command', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='stderrRedirectPath', full_name='Parameters.stderrRedirectPath', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='stdoutRedirectPath', full_name='Parameters.stdoutRedirectPath', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=498, serialized_end=593, ) _COMMAND_ENVVARSENTRY = _descriptor.Descriptor( name='EnvVarsEntry', full_name='Command.EnvVarsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='Command.EnvVarsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='Command.EnvVarsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=689, serialized_end=735, ) _COMMAND = _descriptor.Descriptor( name='Command', full_name='Command', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='commandLine', full_name='Command.commandLine', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='packagePath', full_name='Command.packagePath', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='envVars', full_name='Command.envVars', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_COMMAND_ENVVARSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=596, serialized_end=735, ) _VNODE = _descriptor.Descriptor( name='VNode', full_name='VNode', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image', full_name='VNode.image', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='instance', full_name='VNode.instance', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mount', full_name='VNode.mount', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='network', full_name='VNode.network', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hostname', full_name='VNode.hostname', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=737, serialized_end=864, ) _NETWORK = _descriptor.Descriptor( name='Network', full_name='Network', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='ipaddr', full_name='Network.ipaddr', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='gateway', full_name='Network.gateway', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='masterip', full_name='Network.masterip', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='brname', full_name='Network.brname', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=866, serialized_end=942, ) _IMAGE = _descriptor.Descriptor( name='Image', full_name='Image', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='Image.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='type', full_name='Image.type', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='owner', full_name='Image.owner', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ _IMAGE_IMAGETYPE, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=944, serialized_end=1060, ) _MOUNT = _descriptor.Descriptor( name='Mount', full_name='Mount', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='provider', full_name='Mount.provider', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='localPath', full_name='Mount.localPath', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='remotePath', full_name='Mount.remotePath', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='accessKey', full_name='Mount.accessKey', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='secretKey', full_name='Mount.secretKey', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='other', full_name='Mount.other', index=5, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1062, serialized_end=1179, ) _INSTANCE = _descriptor.Descriptor( name='Instance', full_name='Instance', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='cpu', full_name='Instance.cpu', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memory', full_name='Instance.memory', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='disk', full_name='Instance.disk', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='gpu', full_name='Instance.gpu', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1181, serialized_end=1247, ) _VNODEINFO.fields_by_name['vnode'].message_type = _VNODE _REPLY.fields_by_name['status'].enum_type = _REPLY_REPLYSTATUS _REPLY_REPLYSTATUS.containing_type = _REPLY _REPORTMSG.fields_by_name['taskmsgs'].message_type = _TASKMSG _TASKMSG.fields_by_name['subTaskStatus'].enum_type = _STATUS _TASKINFO.fields_by_name['parameters'].message_type = _PARAMETERS _PARAMETERS.fields_by_name['command'].message_type = _COMMAND _COMMAND_ENVVARSENTRY.containing_type = _COMMAND _COMMAND.fields_by_name['envVars'].message_type = _COMMAND_ENVVARSENTRY _VNODE.fields_by_name['image'].message_type = _IMAGE _VNODE.fields_by_name['instance'].message_type = _INSTANCE _VNODE.fields_by_name['mount'].message_type = _MOUNT _VNODE.fields_by_name['network'].message_type = _NETWORK _IMAGE.fields_by_name['type'].enum_type = _IMAGE_IMAGETYPE _IMAGE_IMAGETYPE.containing_type = _IMAGE DESCRIPTOR.message_types_by_name['VNodeInfo'] = _VNODEINFO DESCRIPTOR.message_types_by_name['Reply'] = _REPLY DESCRIPTOR.message_types_by_name['ReportMsg'] = _REPORTMSG DESCRIPTOR.message_types_by_name['TaskMsg'] = _TASKMSG DESCRIPTOR.message_types_by_name['TaskInfo'] = _TASKINFO DESCRIPTOR.message_types_by_name['Parameters'] = _PARAMETERS DESCRIPTOR.message_types_by_name['Command'] = _COMMAND DESCRIPTOR.message_types_by_name['VNode'] = _VNODE DESCRIPTOR.message_types_by_name['Network'] = _NETWORK DESCRIPTOR.message_types_by_name['Image'] = _IMAGE DESCRIPTOR.message_types_by_name['Mount'] = _MOUNT DESCRIPTOR.message_types_by_name['Instance'] = _INSTANCE DESCRIPTOR.enum_types_by_name['Status'] = _STATUS _sym_db.RegisterFileDescriptor(DESCRIPTOR) VNodeInfo = _reflection.GeneratedProtocolMessageType('VNodeInfo', (_message.Message,), dict( DESCRIPTOR = _VNODEINFO, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:VNodeInfo) )) _sym_db.RegisterMessage(VNodeInfo) Reply = _reflection.GeneratedProtocolMessageType('Reply', (_message.Message,), dict( DESCRIPTOR = _REPLY, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:Reply) )) _sym_db.RegisterMessage(Reply) ReportMsg = _reflection.GeneratedProtocolMessageType('ReportMsg', (_message.Message,), dict( DESCRIPTOR = _REPORTMSG, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:ReportMsg) )) _sym_db.RegisterMessage(ReportMsg) TaskMsg = _reflection.GeneratedProtocolMessageType('TaskMsg', (_message.Message,), dict( DESCRIPTOR = _TASKMSG, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:TaskMsg) )) _sym_db.RegisterMessage(TaskMsg) TaskInfo = _reflection.GeneratedProtocolMessageType('TaskInfo', (_message.Message,), dict( DESCRIPTOR = _TASKINFO, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:TaskInfo) )) _sym_db.RegisterMessage(TaskInfo) Parameters = _reflection.GeneratedProtocolMessageType('Parameters', (_message.Message,), dict( DESCRIPTOR = _PARAMETERS, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:Parameters) )) _sym_db.RegisterMessage(Parameters) Command = _reflection.GeneratedProtocolMessageType('Command', (_message.Message,), dict( EnvVarsEntry = _reflection.GeneratedProtocolMessageType('EnvVarsEntry', (_message.Message,), dict( DESCRIPTOR = _COMMAND_ENVVARSENTRY, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:Command.EnvVarsEntry) )) , DESCRIPTOR = _COMMAND, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:Command) )) _sym_db.RegisterMessage(Command) _sym_db.RegisterMessage(Command.EnvVarsEntry) VNode = _reflection.GeneratedProtocolMessageType('VNode', (_message.Message,), dict( DESCRIPTOR = _VNODE, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:VNode) )) _sym_db.RegisterMessage(VNode) Network = _reflection.GeneratedProtocolMessageType('Network', (_message.Message,), dict( DESCRIPTOR = _NETWORK, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:Network) )) _sym_db.RegisterMessage(Network) Image = _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), dict( DESCRIPTOR = _IMAGE, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:Image) )) _sym_db.RegisterMessage(Image) Mount = _reflection.GeneratedProtocolMessageType('Mount', (_message.Message,), dict( DESCRIPTOR = _MOUNT, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:Mount) )) _sym_db.RegisterMessage(Mount) Instance = _reflection.GeneratedProtocolMessageType('Instance', (_message.Message,), dict( DESCRIPTOR = _INSTANCE, __module__ = 'rpc_pb2' # @@protoc_insertion_point(class_scope:Instance) )) _sym_db.RegisterMessage(Instance) _COMMAND_ENVVARSENTRY.has_options = True _COMMAND_ENVVARSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) _MASTER = _descriptor.ServiceDescriptor( name='Master', full_name='Master', file=DESCRIPTOR, index=0, options=None, serialized_start=1342, serialized_end=1382, methods=[ _descriptor.MethodDescriptor( name='report', full_name='Master.report', index=0, containing_service=None, input_type=_REPORTMSG, output_type=_REPLY, options=None, ), ]) _sym_db.RegisterServiceDescriptor(_MASTER) DESCRIPTOR.services_by_name['Master'] = _MASTER _WORKER = _descriptor.ServiceDescriptor( name='Worker', full_name='Worker', file=DESCRIPTOR, index=1, options=None, serialized_start=1385, serialized_end=1535, methods=[ _descriptor.MethodDescriptor( name='start_vnode', full_name='Worker.start_vnode', index=0, containing_service=None, input_type=_VNODEINFO, output_type=_REPLY, options=None, ), _descriptor.MethodDescriptor( name='start_task', full_name='Worker.start_task', index=1, containing_service=None, input_type=_TASKINFO, output_type=_REPLY, options=None, ), _descriptor.MethodDescriptor( name='stop_task', full_name='Worker.stop_task', index=2, containing_service=None, input_type=_TASKINFO, output_type=_REPLY, options=None, ), _descriptor.MethodDescriptor( name='stop_vnode', full_name='Worker.stop_vnode', index=3, containing_service=None, input_type=_VNODEINFO, output_type=_REPLY, options=None, ), ]) _sym_db.RegisterServiceDescriptor(_WORKER) DESCRIPTOR.services_by_name['Worker'] = _WORKER # @@protoc_insertion_point(module_scope) ================================================ FILE: src/protos/rpc_pb2_grpc.py ================================================ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc from protos import rpc_pb2 as rpc__pb2 class MasterStub(object): # missing associated documentation comment in .proto file pass def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.report = channel.unary_unary( '/Master/report', request_serializer=rpc__pb2.ReportMsg.SerializeToString, response_deserializer=rpc__pb2.Reply.FromString, ) class MasterServicer(object): # missing associated documentation comment in .proto file pass def report(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_MasterServicer_to_server(servicer, server): rpc_method_handlers = { 'report': grpc.unary_unary_rpc_method_handler( servicer.report, request_deserializer=rpc__pb2.ReportMsg.FromString, response_serializer=rpc__pb2.Reply.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'Master', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) class WorkerStub(object): # missing associated documentation comment in .proto file pass def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.start_vnode = channel.unary_unary( '/Worker/start_vnode', request_serializer=rpc__pb2.VNodeInfo.SerializeToString, response_deserializer=rpc__pb2.Reply.FromString, ) self.start_task = channel.unary_unary( '/Worker/start_task', request_serializer=rpc__pb2.TaskInfo.SerializeToString, response_deserializer=rpc__pb2.Reply.FromString, ) self.stop_task = channel.unary_unary( '/Worker/stop_task', request_serializer=rpc__pb2.TaskInfo.SerializeToString, response_deserializer=rpc__pb2.Reply.FromString, ) self.stop_vnode = channel.unary_unary( '/Worker/stop_vnode', request_serializer=rpc__pb2.VNodeInfo.SerializeToString, response_deserializer=rpc__pb2.Reply.FromString, ) class WorkerServicer(object): # missing associated documentation comment in .proto file pass def start_vnode(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def start_task(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def stop_task(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def stop_vnode(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_WorkerServicer_to_server(servicer, server): rpc_method_handlers = { 'start_vnode': grpc.unary_unary_rpc_method_handler( servicer.start_vnode, request_deserializer=rpc__pb2.VNodeInfo.FromString, response_serializer=rpc__pb2.Reply.SerializeToString, ), 'start_task': grpc.unary_unary_rpc_method_handler( servicer.start_task, request_deserializer=rpc__pb2.TaskInfo.FromString, response_serializer=rpc__pb2.Reply.SerializeToString, ), 'stop_task': grpc.unary_unary_rpc_method_handler( servicer.stop_task, request_deserializer=rpc__pb2.TaskInfo.FromString, response_serializer=rpc__pb2.Reply.SerializeToString, ), 'stop_vnode': grpc.unary_unary_rpc_method_handler( servicer.stop_vnode, request_deserializer=rpc__pb2.VNodeInfo.FromString, response_serializer=rpc__pb2.Reply.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'Worker', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) ================================================ FILE: src/utils/env.py ================================================ import os,netifaces def getenv(key): if key == "CLUSTER_NAME": return os.environ.get("CLUSTER_NAME", "docklet-vc") elif key == "FS_PREFIX": return os.environ.get("FS_PREFIX", "/opt/docklet") elif key == "CLUSTER_SIZE": return int(os.environ.get("CLUSTER_SIZE", 1)) elif key == "CLUSTER_NET": return os.environ.get("CLUSTER_NET", "172.16.0.1/16") elif key == "CONTAINER_CPU": return int(os.environ.get("CONTAINER_CPU", 100000)) elif key == "CONTAINER_DISK": return int(os.environ.get("CONTAINER_DISK", 1000)) elif key == "CONTAINER_MEMORY": return int(os.environ.get("CONTAINER_MEMORY", 1000)) elif key == "DISKPOOL_SIZE": return int(os.environ.get("DISKPOOL_SIZE", 10000)) elif key == "ETCD": return os.environ.get("ETCD", "localhost:2379") elif key == "NETWORK_DEVICE": return os.environ.get("NETWORK_DEVICE", "eth0") elif key == "MASTER_IP": return os.environ.get("MASTER_IP", "0.0.0.0") elif key == "MASTER_IPS": return os.environ.get("MASTER_IPS", "0.0.0.0@docklet") elif key == "MASTER_PORT": return int(os.environ.get("MASTER_PORT", 9000)) elif key == "WORKER_PORT": return int(os.environ.get("WORKER_PORT", 9001)) elif key == "NGINX_PORT": return int(os.environ.get("NGINX_PORT", 8080)) elif key == "PROXY_PORT": return int(os.environ.get("PROXY_PORT", 8000)) elif key == "PROXY_API_PORT": return int(os.environ.get("PROXY_API_PORT", 8001)) elif key == "WEB_PORT": return int(os.environ.get("WEB_PORT", 8888)) elif key == "PORTAL_URL": return os.environ.get("PORTAL_URL", "http://"+getenv("MASTER_IP") + ":" + str(getenv("NGINX_PORT"))) elif key == "LOG_LEVEL": return os.environ.get("LOG_LEVEL", "DEBUG") elif key == "LOG_LIFE": return int(os.environ.get("LOG_LIFE", 10)) elif key == "WEB_LOG_LEVEL": return os.environ.get("WEB_LOG_LEVEL", "DEBUG") elif key == "STORAGE": return os.environ.get("STORAGE", "file") elif key =="EXTERNAL_LOGIN": return os.environ.get("EXTERNAL_LOGIN", "False") elif key =="DATA_QUOTA": return os.environ.get("DATA_QUOTA", "False") elif key =="DATA_QUOTA_CMD": return os.environ.get("DATA_QUOTA_CMD", "gluster volume quota docklet-volume limit-usage %s %s") elif key == 'DISTRIBUTED_GATEWAY': return os.environ.get("DISTRIBUTED_GATEWAY", "False") elif key == "PUBLIC_IP": device = os.environ.get("NETWORK_DEVICE","eth0") addr = netifaces.ifaddresses(device) if 2 in addr: return os.environ.get("PUBLIC_IP",addr[2][0]['addr']) else: return os.environ.get("PUBLIC_IP","0.0.0.0") elif key == "NGINX_CONF": return os.environ.get("NGINX_CONF","/etc/nginx") elif key =="USER_IP": return os.environ.get("USER_IP","0.0.0.0") elif key =="USER_PORT": return int(os.environ.get("USER_PORT",9100)) elif key =="AUTH_KEY": return os.environ.get("AUTH_KEY","docklet") elif key =="OPEN_REGISTRY": return os.environ.get("OPEN_REGISTRY","False") elif key =="APPROVAL_RBT": return os.environ.get("APPROVAL_RBT","ON") elif key =="ALLOCATED_PORTS": return os.environ.get("ALLOCATED_PORTS","10000-65535") elif key =="ALLOW_SCALE_OUT": return os.environ.get("ALLOW_SCALE_OUT", "False") elif key == "WARNING_DAYS": return os.environ.get("WARNING_DAYS", "7") elif key == "RELEASE_DAYS": return os.environ.get("RELEASE_DAYS", "14") elif key == "BATCH_ON": return os.environ.get("BATCH_ON","True") elif key == "BATCH_MASTER_PORT": return os.environ.get("BATCH_MASTER_PORT","50050") elif key == "BATCH_WORKER_PORT": return os.environ.get("BATCH_WORKER_PORT","50051") elif key == "BATCH_TASK_CIDR": return os.environ.get("BATCH_TASK_CIDR","4") elif key == "BATCH_NET": return os.environ.get("BATCH_NET","10.16.0.0/16") elif key == "BATCH_MAX_THREAD_WORKER": return os.environ.get("BATCH_MAX_THREAD_WORKER","5") else: return os.environ.get(key,"") ================================================ FILE: src/utils/etcdlib.py ================================================ #!/usr/bin/python3 ############################################################ # etcdlib.py -- etcdlib provides a python etcd client # author : Bao Li , UniAS, SEI, PKU # license : BSD License ############################################################ import urllib.request, urllib.error import random, json, time #import sys # send http request to etcd server and get the json result # url : url # data : data to send by POST/PUT # method : method used by http request def dorequest(url, data = "", method = 'GET'): try: if method == 'GET': response = urllib.request.urlopen(url, timeout=10).read() else: # use PUT/DELETE/POST, data should be encoded in ascii/bytes request = urllib.request.Request(url, data = data.encode('ascii'), method = method) response = urllib.request.urlopen(request, timeout=10).read() # etcd may return json result with response http error code # http error code will raise exception in urlopen # catch the HTTPError and get the json result except urllib.error.HTTPError as e: # e.fp must be read() in this except block. # the e will be deleted and e.fp will be closed after this block response = e.fp.read() # response is encoded in bytes. # recoded in utf-8 and loaded in json result = json.loads(str(response, encoding='utf-8')) return result # client to use etcd # not all APIs are implemented below. just implement what we want class Client(object): # server is a string of one server IP and PORT, like 192.168.4.12:2379 def __init__(self, server, prefix = ""): self.clientid = str(random.random()) self.server = "http://"+server prefix = prefix.strip("/") if prefix == "": self.keysurl = self.server+"/v2/keys/" else: self.keysurl = self.server+"/v2/keys/"+prefix+"/" self.members = self.getmembers() def getmembers(self): out = dorequest(self.server+"/v2/members") result = [] for one in out['members']: result.append(one['clientURLs'][0]) return result # list etcd servers def listmembers(self): return self.members def clean(self): [baseurl, dirname] = self.keysurl.split("/v2/keys/", maxsplit=1) dirname = dirname.strip("/") if dirname == '': # clean root content [status, result] = self.listdir("") if status: for one in result: if 'dir' in one: self.deldir(one['key']) else: self.delkey(one['key']) if self.isdir("_lock"): self.deldir("_lock") else: # clean a directory if self.isdir("")[0]: self.deldir("") self.createdir("") def getkey(self, key): key = key.strip("/") out = dorequest(self.keysurl+key) if 'action' not in out: return [False, "key not found"] else: return [True, out['node']['value']] def setkey(self, key, value, ttl=0): key = key.strip("/") if ttl == 0: out = dorequest(self.keysurl+key, 'value='+str(value), 'PUT') else: out = dorequest(self.keysurl+key, 'value='+str(value)+"&ttl="+str(ttl), 'PUT') if 'action' not in out: return [False, 'set key failed'] else: return [True, out['node']['value']] def delkey(self, key): key = key.strip("/") out = dorequest(self.keysurl+key, method='DELETE') if 'action' not in out: return [False, 'delete key failed'] else: return [True, out['node']['key']] def isdir(self, dirname): dirname = dirname.strip("/") out = dorequest(self.keysurl+dirname) if 'action' not in out: return [False, dirname+" not found"] if 'dir' not in out['node']: return [False, dirname+" is a key"] return [True, dirname] def createdir(self, dirname): dirname = dirname.strip("/") out = dorequest(self.keysurl+dirname, 'dir=true', 'PUT') if 'action' not in out: return [False, 'create dir failed'] else: return [True, out['node']['key']] # list key-value in the directory. BUT not recursive. # if necessary, recursive can be supported by add ?recursive=true in url def listdir(self, dirname): dirname = dirname.strip("/") out = dorequest(self.keysurl+dirname) if 'action' not in out: return [False, 'list directory failed'] else: if "dir" not in out['node']: return [False, dirname+" is a key"] if 'nodes' not in out['node']: return [True, []] result=[] for kv in out['node']['nodes']: if 'dir' in kv: result.append({"key":kv['key'], 'dir':True}) else: result.append({"key":kv['key'], 'value':kv['value']}) return [True, result] # del directory with recursive=true def deldir(self, dirname): dirname = dirname.strip("/") out = dorequest(self.keysurl+dirname+"?recursive=true", method='DELETE') if 'action' not in out: return [False, 'delete directory failed'] else: return [True, out['node']['key']] # watch a key or directory when it changes. # recursive=true means anything in the directory changes, it will return def watch(self, key): key = key.strip("/") out = dorequest(self.keysurl+key+"?wait=true&recursive=true") if 'action' not in out: return [False, 'watch key failed'] else: return [True, out['node']['value']] # atomic create a key. return immediately with True or False def atomiccreate(self, key, value='atom'): key = key.strip("/") out = dorequest(self.keysurl+key+"?prevExist=false", 'value='+value, method='PUT') if 'action' not in out: return [False, 'atomic create key failed'] else: return [True, out['node']['key']] ################# Lock ################## # lockref(key) : get a reference of a lock named key in etcd. # not need to create this lock. it is automatical. # acquire(lockref) : acquire this lock by lockref. # blocked if lock is holded by others # release(lockref) : release this lock by lockref # only can be released by holder ######################################### def lockref(self, key): key = key.strip("/") return "_lock/"+key def acquire(self, lockref): while(True): if self.atomiccreate(lockref, self.clientid)[0]: return [True, 'get lock'] else: time.sleep(0.01) def release(self, lockref): value = self.getkey(lockref) if value[0]: if value[1] == self.clientid: self.delkey(lockref) return [True, 'release lock'] else: return [False, 'you are not lock holder'] else: return [False, 'no one holds this lock'] ================================================ FILE: src/utils/gputools.py ================================================ import lxc import subprocess import os import signal from utils.log import logger # Note: keep physical device id always the same as the virtual device id # device_path e.g. /dev/nvidia0 def add_device(container_name, device_path): c = lxc.Container(container_name) return c.add_device_node(device_path, device_path) def remove_device(container_name, device_path): c = lxc.Container(container_name) return c.remove_device_node('', device_path) # Mon May 21 10:51:45 2018 # +-----------------------------------------------------------------------------+ # | NVIDIA-SMI 381.22 Driver Version: 381.22 | # |-------------------------------+----------------------+----------------------+ # | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | # | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | # |===============================+======================+======================| # | 0 GeForce GTX 108... Off | 0000:02:00.0 Off | N/A | # | 33% 53C P2 59W / 250W | 295MiB / 11172MiB | 2% Default | # +-------------------------------+----------------------+----------------------+ # | 1 GeForce GTX 108... Off | 0000:84:00.0 Off | N/A | # | 21% 35C P8 10W / 250W | 161MiB / 11172MiB | 0% Default | # +-------------------------------+----------------------+----------------------+ # # +-----------------------------------------------------------------------------+ # | Processes: GPU Memory | # | GPU PID Type Process name Usage | # |=============================================================================| # | 0 111893 C python3 285MiB | # | 1 111893 C python3 151MiB | # +-----------------------------------------------------------------------------+ # def nvidia_smi(args=[]): try: cmd = ['nvidia-smi'] cmd.extend(args) ret = subprocess.check_output(cmd, stderr=subprocess.STDOUT, timeout=5) return ret.split('\n') except subprocess.TimeoutExpired: return None except subprocess.CalledProcessError: return None except Exception as e: return None def get_gpu_driver_version(): output = nvidia_smi() if not output: return None else: return output[2].split()[-2] # GPU 0: GeForce GTX 1080 Ti (UUID: GPU-a1c9b91b-5fb2-6059-9784-29ae78cdba8f) # GPU 1: GeForce GTX 1080 Ti (UUID: GPU-36a9e2ff-b71d-8601-d0c5-72e0ec72564b) def get_gpu_names(): output = nvidia_smi(['-L']) if not output: return [] gpu_names = [] for line in output: start = line.find(':') + 1 end = line.find('(') name = line[start:end].strip().replace(' ', '-') if name: gpu_names.append(name) return gpu_names def get_gpu_status(): output = nvidia_smi() if not output: return [] interval_index = [index for index in range(len(output)) if len(output[index].strip()) == 0][0] status_list = [] for index in range(7, interval_index, 3): status = {} status['id'] = output[index].split()[1] sp = output[index+1].split() status['fan'] = sp[1] status['memory'] = sp[8] status['memory_max'] = sp[10] status['util'] = sp[12] status_list.append(status) return status_list def get_gpu_processes(): output = nvidia_smi() if not output: return [] interval_index = [index for index in range(len(output)) if len(output[index].strip()) == 0][0] process_list = [] for index in range(interval_index + 5, len(output)): sp = output[index].split() if len(sp) != 7: break process = {} process['gpu'] = sp[1] process['pid'] = sp[2] process['name'] = sp[4] process['memory'] = sp[5] process['container'] = get_container_name_by_pid(sp[2]) process_list.append(process) return process_list def get_container_name_by_pid(pid): with open('/proc/%s/cgroup' % pid) as f: content = f.readlines()[0].strip().split('/') if content[1] != 'lxc': return 'host' else: return content[2] return None def clean_up_processes_in_gpu(gpu_id): logger.info('[gputools] start clean up processes in gpu %d' % gpu_id) processes = get_gpu_processes() for process in [p for p in processes if p['gpu'] == gpu_id]: logger.info('[gputools] find process %d running in gpu %d' % (process['pid'], process['gpu'])) if process['container'] == 'host': logger.warning('[gputools] find process of host, ignored') else: logger.warning('[gputools] find process of container [%s], killed' % process['container']) try: os.kill(process['pid'], signal.SIGKILL) except OSError: continue ================================================ FILE: src/utils/imagemgr.py ================================================ #!/usr/bin/python3 """ design: 1. When user create an image, it will upload to an image server, at the same time, local host will save an image. A time file will be made with them. Everytime a container start by this image, the time file will update. 2. When user save an image, if it is a update option, it will faster than create a new image. 3. At image server and every physical host, run a shell script to delete the image, which is out of time. 4. We can show every user their own images and the images are shared by other. User can new a cluster or scale out a new node by them. And user can remove his own images. 5. When a remove option occur, the image server will delete it. But some physical host may also maintain it. I think it doesn't matter. 6. The manage of lvm has been including in this module. """ from configparser import ConfigParser from io import StringIO import os,sys,subprocess,time,re,datetime,threading,random import xmlrpc.client from utils.model import db, Image from utils.log import logger from utils import env, updatebase from utils.lvmtool import * import requests master_port = str(env.getenv('MASTER_PORT')) class ImageMgr(): #def sys_call(self,command): # output = subprocess.getoutput(command).strip() # return None if output == '' else output def sys_return(self,command): return_value = subprocess.call(command,shell=True) return return_value def __init__(self): self.NFS_PREFIX = env.getenv('FS_PREFIX') self.imgpath = self.NFS_PREFIX + "/global/images/" self.srcpath = env.getenv('DOCKLET_LIB') + "/" self.imageserver = "192.168.6.249" def datetime_toString(self,dt): return dt.strftime("%Y-%m-%d %H:%M:%S") def string_toDatetime(self,string): return datetime.datetime.strptime(string, "%Y-%m-%d %H:%M:%S") def updateinfo(self,user,imagename,description): '''image_info_file = open(imgpath+"."+image+".info",'w') image_info_file.writelines([self.datetime_toString(datetime.datetime.now()) + "\n", "unshare"]) image_info_file.close() image_description_file = open(imgpath+"."+image+".description", 'w') image_description_file.write(description) image_description_file.close()''' image = Image.query.filter_by(ownername=user,imagename=imagename).first() if image is None: newimage = Image(imagename,True,False,user,description) db.session.add(newimage) db.session.commit() def dealpath(self,fspath): if fspath[-1:] == "/": return self.dealpath(fspath[:-1]) else: return fspath def createImage(self,user,image,lxc,description="Not thing", imagenum=10): fspath = self.NFS_PREFIX + "/local/volume/" + lxc imgpath = self.imgpath + "private/" + user + "/" #tmppath = self.NFS_PREFIX + "/local/tmpimg/" #tmpimage = str(random.randint(0,10000000)) + ".tz" if not os.path.exists(imgpath+image) and os.path.exists(imgpath): cur_imagenum = 0 for filename in os.listdir(imgpath): if os.path.isdir(imgpath+filename): cur_imagenum += 1 if cur_imagenum >= int(imagenum): return [False,"image number limit exceeded"] #sys_run("mkdir -p %s" % tmppath, True) sys_run("mkdir -p %s" % imgpath,True) try: sys_run("tar -cvf %s -C %s ." % (imgpath+image+".tz",self.dealpath(fspath)), True) except Exception as e: logger.error(e) #try: #sys_run("cp %s %s" % (tmppath+tmpimage, imgpath+image+".tz"), True) #sys_run("rsync -a --delete --exclude=lost+found/ --exclude=root/nfs/ --exclude=dev/ --exclude=mnt/ --exclude=tmp/ --exclude=media/ --exclude=proc/ --exclude=sys/ %s/ %s/" % (self.dealpath(fspath),imgpath+image),True) #except Exception as e: # logger.error(e) #sys_run("rm -f %s" % tmppath+tmpimage, True) #sys_run("rm -f %s" % (imgpath+"."+image+"_docklet_share"),True) self.updateinfo(user,image,description) logger.info("image:%s from LXC:%s create success" % (image,lxc)) return [True, "create image success"] def prepareImage(self,user,image,fspath): imagename = image['name'] imagetype = image['type'] imageowner = image['owner'] #tmppath = self.NFS_PREFIX + "/local/tmpimg/" #tmpimage = str(random.randint(0,10000000)) + ".tz" if imagename == "base" and imagetype == "base": return if imagetype == "private": imgpath = self.imgpath + "private/" + user + "/" else: imgpath = self.imgpath + "public/" + imageowner + "/" #try: # sys_run("cp %s %s" % (imgpath+imagename+".tz", tmppath+tmpimage)) #except Exception as e: # logger.error(e) try: sys_run("tar -C %s -xvf %s" % (self.dealpath(fspath),imgpath+imagename+".tz"), True) #sys_run("rsync -a --delete --exclude=lost+found/ --exclude=root/nfs/ --exclude=dev/ --exclude=mnt/ --exclude=tmp/ --exclude=media/ --exclude=proc/ --exclude=sys/ %s/ %s/" % (imgpath+imagename,self.dealpath(fspath)),True) except Exception as e: logger.error(e) #sys_run("rm -f %s" % tmppath+tmpimage) #self.sys_call("rsync -a --delete --exclude=nfs/ %s/ %s/" % (imgpath+image,self.dealpath(fspath))) #self.updatetime(imgpath,image) return def prepareFS(self,user,image,lxc,size="1000",vgname="docklet-group"): rootfs = "/var/lib/lxc/%s/rootfs" % lxc layer = self.NFS_PREFIX + "/local/volume/" + lxc #check mountpoint Ret = sys_run("mountpoint %s" % rootfs) if Ret.returncode == 0: logger.info("%s not clean" % rootfs) sys_run("umount -l %s" % rootfs) Ret = sys_run("mountpoint %s" % layer) if Ret.returncode == 0: logger.info("%s not clean" % layer) sys_run("umount -l %s" % layer) try: sys_run("rm -rf %s %s" % (rootfs, layer)) sys_run("mkdir -p %s %s" % (rootfs, layer)) except Exception as e: logger.error(e) #prepare volume if check_volume(vgname,lxc): logger.info("volume %s already exists, delete it") delete_volume(vgname,lxc) if not new_volume(vgname,lxc,size): logger.error("volume %s create failed" % lxc) return False try: sys_run("mkfs.ext4 /dev/%s/%s" % (vgname,lxc),True) sys_run("mount /dev/%s/%s %s" %(vgname,lxc,layer),True) #self.sys_call("mkdir -p %s/overlay %s/work" % (layer,layer)) #self.sys_call("mount -t overlay overlay -olowerdir=%s/local/basefs,upperdir=%s/overlay,workdir=%s/work %s" % (self.NFS_PREFIX,layer,layer,rootfs)) #self.prepareImage(user,image,layer+"/overlay") self.prepareImage(user,image,layer) logger.info("image has been prepared") sys_run("mount -t aufs -o br=%s=rw:%s/local/packagefs=ro+wh:%s/local/basefs=ro+wh -o udba=reval none %s/" % (layer,self.NFS_PREFIX,self.NFS_PREFIX,rootfs),True) sys_run("mkdir -m 777 -p %s/local/temp/%s" % (self.NFS_PREFIX,lxc)) except Exception as e: logger.error(e) logger.info("FS has been prepared for user:%s lxc:%s" % (user,lxc)) return True def deleteFS(self,lxc,vgname="docklet-group"): rootfs = "/var/lib/lxc/%s/rootfs" % lxc layer = self.NFS_PREFIX + "/local/volume/" + lxc lxcpath = "/var/lib/lxc/%s" % lxc sys_run("lxc-stop -k -n %s" % lxc) #check mountpoint Ret = sys_run("mountpoint %s" % rootfs) if Ret.returncode == 0: sys_run("umount -l %s" % rootfs) Ret = sys_run("mountpoint %s" % layer) if Ret.returncode == 0: sys_run("umount -l %s" % layer) if check_volume(vgname, lxc): delete_volume(vgname, lxc) try: sys_run("rm -rf %s %s" % (layer,lxcpath)) sys_run("rm -rf %s/local/temp/%s" % (self.NFS_PREFIX,lxc)) except Exception as e: logger.error(e) return True def detachFS(self, lxc, vgname="docklet-group"): rootfs = "/var/lib/lxc/%s/rootfs" % lxc Ret = sys_run("umount %s" % rootfs) if Ret.returncode != 0: logger.error("cannot umount rootfs:%s" % rootfs) return False return True def checkFS(self, lxc, vgname="docklet-group"): rootfs = "/var/lib/lxc/%s/rootfs" % lxc layer = self.NFS_PREFIX + "/local/volume/" + lxc if not os.path.isdir(layer): sys_run("mkdir -p %s" % layer) #check mountpoint Ret = sys_run("mountpoint %s" % layer) if Ret.returncode != 0: sys_run("mount /dev/%s/%s %s" % (vgname,lxc,layer)) Ret = sys_run("mountpoint %s" % rootfs) if Ret.returncode != 0: sys_run("mount -t aufs -o br=%s=rw:%s/local/packagefs=ro+wh:%s/local/basefs=ro+wh -o udba=reval none %s/" % (layer,self.NFS_PREFIX,self.NFS_PREFIX,rootfs)) return True def removeImage(self,user,imagename): imgpath = self.imgpath + "private/" + user + "/" try: image = Image.query.filter_by(imagename=imagename,ownername=user).first() image.hasPrivate = False if image.hasPublic == False: db.session.delete(image) db.session.commit() sys_run("rm -rf %s/" % imgpath+imagename+".tz", True) #sys_run("rm -f %s" % imgpath+"."+image+".info", True) #sys_run("rm -f %s" % (imgpath+"."+image+".description"), True) except Exception as e: logger.error(e) def shareImage(self,user,imagename): imgpath = self.imgpath + "private/" + user + "/" share_imgpath = self.imgpath + "public/" + user + "/" '''image_info_file = open(imgpath+"."+image+".info", 'r') [createtime, isshare] = image_info_file.readlines() isshare = "shared" image_info_file.close() image_info_file = open(imgpath+"."+image+".info", 'w') image_info_file.writelines([createtime, isshare]) image_info_file.close()''' try: image = Image.query.filter_by(imagename=imagename,ownername=user).first() if image.hasPublic == True: return image.hasPublic = True db.session.commit() sys_run("mkdir -p %s" % share_imgpath, True) sys_run("cp %s %s" % (imgpath+imagename+".tz", share_imgpath+imagename+".tz"), True) #sys_run("rsync -a --delete %s/ %s/" % (imgpath+image,share_imgpath+image), True) except Exception as e: logger.error(e) #$sys_run("cp %s %s" % (imgpath+"."+image+".info",share_imgpath+"."+image+".info"), True) #sys_run("cp %s %s" % (imgpath+"."+image+".description",share_imgpath+"."+image+".description"), True) def unshareImage(self,user,imagename): public_imgpath = self.imgpath + "public/" + user + "/" imgpath = self.imgpath + "private/" + user + "/" '''if os.path.isfile(imgpath + image + ".tz"): image_info_file = open(imgpath+"."+image+".info", 'r') [createtime, isshare] = image_info_file.readlines() isshare = "unshare" image_info_file.close() image_info_file = open(imgpath+"."+image+".info", 'w') image_info_file.writelines([createtime, isshare]) image_info_file.close()''' try: #sys_run("rm -rf %s/" % public_imgpath+image, True) image = Image.query.filter_by(imagename=imagename,ownername=user).first() image.hasPublic = False if image.hasPrivate == False: db.session.delete(image) db.session.commit() sys_run("rm -f %s" % public_imgpath+imagename+".tz", True) #sys_run("rm -f %s" % public_imgpath+"."+image+".info", True) #sys_run("rm -f %s" % public_imgpath+"."+image+".description", True) except Exception as e: logger.error(e) def copyImage(self,user,image,token,target): path = "/opt/docklet/global/images/private/"+user+"/" '''image_info_file = open(path+"."+image+".info", 'r') [createtime, isshare] = image_info_file.readlines() recordshare = isshare isshare = "unshared" image_info_file.close() image_info_file = open(path+"."+image+".info", 'w') image_info_file.writelines([createtime, isshare]) image_info_file.close()''' try: sys_run('ssh root@%s "mkdir -p %s"' % (target,path)) sys_run('scp %s%s.tz root@%s:%s' % (path,image,target,path)) #sys_run('scp %s.%s.description root@%s:%s' % (path,image,target,path)) #sys_run('scp %s.%s.info root@%s:%s' % (path,image,target,path)) resimage = Image.query.filter_by(ownername=user,imagename=image).first() auth_key = env.getenv('AUTH_KEY') url = "http://" + target + ":" + master_port + "/image/copytarget/" data = {"token":token,"auth_key":auth_key,"user":user,"imagename":image,"description":resimage.description} result = requests.post(url, data=data).json() logger.info("Response from target master: " + str(result)) except Exception as e: logger.error(e) '''image_info_file = open(path+"."+image+".info", 'w') image_info_file.writelines([createtime, recordshare]) image_info_file.close()''' return {'success':'false', 'message':str(e)} '''image_info_file = open(path+"."+image+".info", 'w') image_info_file.writelines([createtime, recordshare]) image_info_file.close()''' logger.info("copy image %s of %s to %s success" % (image,user,target)) return {'success':'true', 'action':'copy image'} def update_basefs(self,imagename): imgpath = self.imgpath + "private/root/" basefs = self.NFS_PREFIX+"/local/packagefs/" tmppath = self.NFS_PREFIX + "/local/tmpimg/" tmpimage = str(random.randint(0,10000000)) try: sys_run("mkdir -p %s" % tmppath+tmpimage) sys_run("tar -C %s -xvf %s" % (tmppath+tmpimage,imgpath+imagename+".tz"),True) logger.info("start updating base image") updatebase.aufs_update_base(tmppath+tmpimage, basefs) logger.info("update base image success") except Exception as e: logger.error(e) sys_run("rm -rf %s" % tmppath+tmpimage) return True def update_base_image(self, user, vclustermgr, image): if not user == "root": logger.info("only root can update base image") #vclustermgr.stop_allclusters() #vclustermgr.detach_allclusters() workers = vclustermgr.nodemgr.get_nodeips() logger.info("update base image in all workers") for worker in workers: workerrpc = vclustermgr.nodemgr.ip_to_rpc(worker) workerrpc.update_basefs(image) logger.info("update base image success") #vclustermgr.mount_allclusters() #logger.info("mount all cluster success") #vclustermgr.recover_allclusters() #logger.info("recover all cluster success") return [True, "update base image"] def get_image_info(self, user, imagename, imagetype): '''if imagetype == "private": imgpath = self.imgpath + "private/" + user + "/" else: imgpath = self.imgpath + "public/" + user + "/" image_info_file = open(imgpath+"."+image+".info",'r') time = image_info_file.readline() image_info_file.close() image_description_file = open(imgpath+"."+image+".description",'r') description = image_description_file.read() image_description_file.close()''' image = Image.query.filter_by(imagename=imagename,ownername=user).first() if image is None: return ["", ""] time = image.create_time.strftime("%Y-%m-%d %H:%M:%S") description = image.description if len(description) > 15: description = description[:15] + "......" return [time, description] def get_image_description(self, user, image): '''if image['type'] == "private": imgpath = self.imgpath + "private/" + user + "/" else: imgpath = self.imgpath + "public/" + image['owner'] + "/" image_description_file = open(imgpath+"."+image['name']+".description", 'r') description = image_description_file.read() image_description_file.close()''' image = Image.query.filter_by(imagename=image['name'],ownername=image['owner']).first() if image is None: return "" return image.description def get_image_size(self, image): imagename = image['name'] imagetype = image['type'] imageowner = image['owner'] if imagename == "base" and imagetype == "base": return 0 if imagetype == "private": imgpath = self.imgpath + "private/" + imageowner + "/" else: imgpath = self.imgpath + "public/" + imageowner + "/" return os.stat(os.path.join(imgpath, imagename+".tz")).st_size // (1024*1024) def format_size(self, size_in_byte): if size_in_byte < 1024: return str(size_in_byte) + "B" elif size_in_byte < 1024*1024: return str(size_in_byte//1024) + "KB" elif size_in_byte < 1024*1024*1024: return str(size_in_byte//(1024*1024)) + "MB" else: return str(size_in_byte//(1024*1024*1024)) + "GB" def list_images(self,user): images = {} images["private"] = [] images["public"] = {} imgpath = self.imgpath + "private/" + user + "/" try: Ret = sys_run("ls %s" % imgpath, True) private_images = str(Ret.stdout,"utf-8").split() for image in private_images: if not image[-3:] == '.tz': continue imagename = image[:-3] fimage={} fimage["name"] = imagename fimage["isshared"] = self.isshared(user,imagename) [time, description] = self.get_image_info(user, imagename, "private") fimage["time"] = time fimage["description"] = description fimage["size"] = os.stat(os.path.join(imgpath, image)).st_size fimage["size_format"] = self.format_size(fimage["size"]) fimage["size_in_mb"] = fimage["size"] // (1024*1024) images["private"].append(fimage) except Exception as e: logger.error(e) imgpath = self.imgpath + "public" + "/" try: Ret = sys_run("ls %s" % imgpath, True) public_users = str(Ret.stdout,"utf-8").split() for public_user in public_users: imgpath = self.imgpath + "public/" + public_user + "/" try: Ret = sys_run("ls %s" % imgpath, True) public_images = str(Ret.stdout,"utf-8").split() if len(public_images)==0: continue images["public"][public_user] = [] for image in public_images: if not image[-3:] == '.tz': continue imagename = image[:-3] fimage = {} fimage["name"] = imagename [time, description] = self.get_image_info(public_user, imagename, "public") fimage["time"] = time fimage["description"] = description fimage["size"] = os.stat(os.path.join(imgpath, image)).st_size fimage["size_format"] = self.format_size(fimage["size"]) fimage["size_in_mb"] = fimage["size"] // (1024*1024) images["public"][public_user].append(fimage) except Exception as e: logger.error(e) except Exception as e: logger.error(e) return images def isshared(self,user,imagename): '''imgpath = self.imgpath + "private/" + user + "/" image_info_file = open(imgpath+"."+image+".info",'r') [time, isshare] = image_info_file.readlines() image_info_file.close()''' image = Image.query.filter_by(imagename=imagename,ownername=user).first() if image is None: return "" if image.hasPublic == True: return "true" else: return "false" if __name__ == '__main__': mgr = ImageMgr() if sys.argv[1] == "prepareImage": mgr.prepareImage(sys.argv[2],sys.argv[3],sys.argv[4]) elif sys.argv[1] == "create": mgr.createImage(sys.argv[2],sys.argv[3],sys.argv[4]) else: logger.warning("unknown option") ================================================ FILE: src/utils/log.py ================================================ #!/usr/bin/env python import logging import logging.handlers import argparse import sys import time # this is only being used as part of the example import os from utils import env # logger should only be imported after initlogging has been called logger = None def initlogging(name='docklet'): # Deafults global logger homepath = env.getenv('FS_PREFIX') LOG_FILENAME = homepath + '/local/log/' + name + '.log' LOG_LIFE = env.getenv('LOG_LIFE') LOG_LEVEL = env.getenv('LOG_LEVEL') if LOG_LEVEL == "DEBUG": LOG_LEVEL = logging.DEBUG elif LOG_LEVEL == "INFO": LOG_LEVEL = logging.INFO elif LOG_LEVEL == "WARNING": LOG_LEVEL = logging.WARNING elif LOG_LEVEL == "ERROR": LOG_LEVEL = logging.ERROR elif LOG_LEVEL == "CRITICAL": LOG_LEVEL = logging.CRITIAL else: LOG_LEVEL = logging.DEBUG logger = logging.getLogger(name) # Configure logging to log to a file, making a new file at midnight and keeping the last 3 day's data # Give the logger a unique name (good practice) # Set the log level to LOG_LEVEL logger.setLevel(LOG_LEVEL) # Make a handler that writes to a file, making a new file at midnight and keeping 3 backups handler = logging.handlers.TimedRotatingFileHandler(LOG_FILENAME, when="midnight", backupCount=LOG_LIFE, encoding='utf-8') # Format each log message like this formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(module)s[%(lineno)d] %(message)s') # Attach the formatter to the handler handler.setFormatter(formatter) # Attach the handler to the logger logger.addHandler(handler) # Replace stdout with logging to file at INFO level sys.stdout = RedirectLogger(logger, logging.INFO) # Replace stderr with logging to file at ERROR level sys.stderr = RedirectLogger(logger, logging.ERROR) # Make a class we can use to capture stdout and sterr in the log class RedirectLogger(object): def __init__(self, logger, level): """Needs a logger and a logger level.""" self.logger = logger self.level = level def write(self, message): # Only log if there is a message (not just a new line) if message.rstrip() != "": self.logger.log(self.level, message.rstrip()) def flush(self): for handler in self.logger.handlers: handler.flush() ================================================ FILE: src/utils/logs.py ================================================ #!/usr/bin/python3 from utils import env import json, os from utils.log import logger from werkzeug.utils import secure_filename logsPath = env.getenv('FS_PREFIX') + '/local/log/' class logsClass: setting = {} def list(*args, **kwargs): if ( ('user_group' in kwargs) == False): return {"success":'false', "reason":"Cannot get user_group"} user_group = kwargs['user_group'] if (not ((user_group == 'admin') or (user_group == 'root'))): return {"success": 'false', "reason": 'Unauthorized Action'} s = os.listdir(logsPath) r = [] for i in s: if ('log' in i): r.append(i) return {'success': 'true', 'result': r} def get(*args, **kwargs): if ( ('user_group' in kwargs) == False): return {"success":'false', "reason":"Cannot get user_group"} user_group = kwargs['user_group'] if (not ((user_group == 'admin') or (user_group == 'root'))): return {"success": 'false', "reason": 'Unauthorized Action'} filepath = logsPath + secure_filename(kwargs['filename']) try: if not os.path.exists(filepath): return {"success": 'false', "reason": 'file not exist'} logfile = open(filepath, 'r') logtext = logfile.read() logfile.close() return {'success': 'true', 'result': logtext} except: return {'success': 'false', 'reason': 'file read error'} logs = logsClass() ================================================ FILE: src/utils/lvmtool.py ================================================ #!/usr/bin/python3 import subprocess,os,time from utils.log import logger from utils import env def sys_run(command,check=False): Ret = subprocess.run(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True, check=check) return Ret def new_group(group_name, size = "5000", file_path = "/opt/docklet/local/docklet-storage"): storage = env.getenv("STORAGE") logger.info("begin initialize lvm group:%s with size %sM" % (group_name,size)) if storage == "file": #check vg Ret = sys_run("vgdisplay " + group_name) if Ret.returncode == 0: logger.info("lvm group: " + group_name + " already exists, delete it") Ret = sys_run("vgremove -f " + group_name) if Ret.returncode != 0: logger.error("delete VG %s failed:%s" % (group_name,Ret.stdout.decode('utf-8'))) #check pv Ret = sys_run("pvdisplay /dev/loop0") if Ret.returncode == 0: Ret = sys_run("pvremove -ff /dev/loop0") if Ret.returncode != 0: logger.error("remove pv failed:%s" % Ret.stdout.decode('utf-8')) #check mountpoint Ret = sys_run("losetup /dev/loop0") if Ret.returncode == 0: logger.info("/dev/loop0 already exists, detach it") Ret = sys_run("losetup -d /dev/loop0") if Ret.returncode != 0: logger.error("losetup -d failed:%s" % Ret.stdout.decode('utf-8')) #check file_path if os.path.exists(file_path): logger.info(file_path + " for lvm group already exists, delete it") os.remove(file_path) if not os.path.isdir(file_path[:file_path.rindex("/")]): os.makedirs(file_path[:file_path.rindex("/")]) try: sys_run("dd if=/dev/zero of=%s bs=1M seek=%s count=0" % (file_path,size)) sys_run("losetup /dev/loop0 " + file_path) sys_run("vgcreate %s /dev/loop0" % group_name) except Exception as e: logger.error(e) logger.info("initialize lvm group:%s with size %sM success" % (group_name,size)) return True elif storage == "disk": disk = env.getenv("DISK") if disk is None: logger.error("use disk for story without a physical disk") return False #check vg Ret = sys_run("vgdisplay " + group_name) if Ret.returncode == 0: logger.info("lvm group: " + group_name + " already exists, delete it") Ret = sys_run("vgremove -f " + group_name) if Ret.returncode != 0: logger.error("delete VG %s failed:%s" % (group_name,Ret.stdout.decode('utf-8'))) try: sys_run("vgcreate %s %s" % (group_name,disk)) except Exception as e: logger.error(e) logger.info("initialize lvm group:%s with size %sM success" % (group_name,size)) return True else: logger.info("unknown storage type:" + storage) return False def recover_group(group_name,file_path="/opt/docklet/local/docklet-storage"): storage = env.getenv("STORAGE") if storage == "file": if not os.path.exists(file_path): logger.error("%s not found, unable to recover VG" % file_path) return False #recover mountpoint Ret = sys_run("losetup /dev/loop0") if Ret.returncode != 0: Ret = sys_run("losetup /dev/loop0 " + file_path) if Ret.returncode != 0: logger.error("losetup failed:%s" % Ret.stdout.decode('utf-8')) return False time.sleep(1) #recover vg Ret = sys_run("vgdisplay " + group_name) if Ret.returncode != 0: Ret = sys_run("vgcreate %s /dev/loop0" % group_name) if Ret.returncode != 0: logger.error("create VG %s failed:%s" % (group_name,Ret.stdout.decode('utf-8'))) return False logger.info("recover VG %s success" % group_name) elif storage == "disk": disk = env.getenv("DISK") if disk is None: logger.error("use disk for story without a physical disk") return False #recover vg Ret = sys_run("vgdisplay " + group_name) if Ret.returncode != 0: Ret = sys_run("vgcreate %s %s" % (group_name,disk)) if Ret.returncode != 0: logger.error("create VG %s failed:%s" % (group_name,Ret.stdout.decode('utf-8'))) return False logger.info("recover VG %s success" % group_name) def new_volume(group_name,volume_name,size): Ret = sys_run("lvdisplay %s/%s" % (group_name,volume_name)) if Ret.returncode == 0: logger.info("logical volume already exists, delete it") Ret = sys_run("lvremove -f %s/%s" % (group_name,volume_name)) if Ret.returncode != 0: logger.error("delete logical volume %s failed: %s" % (volume_name, Ret.stdout.decode('utf-8'))) Ret = sys_run("lvcreate -L %sM -n %s %s" % (size,volume_name,group_name)) if Ret.returncode != 0: logger.error("lvcreate failed: %s" % Ret.stdout.decode('utf-8')) return False logger.info("create lv success") return True def check_group(group_name): Ret = sys_run("vgdisplay %s" % group_name) if Ret.returncode == 0: return True else: return False def check_volume(group_name,volume_name): Ret = sys_run("lvdisplay %s/%s" % (group_name,volume_name)) if Ret.returncode == 0: return True else: return False def delete_group(group_name): Ret = sys_run("vgdisplay %s" % group_name) if Ret.returncode == 0: Ret = sys_run("vgremove -f %s" % group_name) if Ret.returncode == 0: logger.info("delete vg %s success" % group_name) return True else: logger.error("delete vg %s failed:%s" % (group_name,Ret.stdout.decode('utf-8'))) return False else: logger.info("vg %s does not exists" % group_name) return True def delete_volume(group_name, volume_name): Ret = sys_run("lvdisplay %s/%s" % (group_name, volume_name)) if Ret.returncode == 0: Ret = sys_run("lvremove -f %s/%s" % (group_name, volume_name)) if Ret.returncode == 0: logger.info("delete lv %s in vg %s success" % (volume_name,group_name)) return True else: logger.error("delete lv %s in vg %s failed:%s" % (volume_name,group_name,Ret.stdout.decode('utf-8'))) return False else: logger.info("lv %s in vg %s does not exists" % (volume_name,group_name)) ================================================ FILE: src/utils/manage.py ================================================ import sys if sys.path[0].endswith("utils"): sys.path[0] = sys.path[0][:-5] from flask_migrate import Migrate,MigrateCommand from utils.model import * from flask_script import Manager from flask import Flask migrate = Migrate(app,db) manager = Manager(app) manager.add_command('db',MigrateCommand) if __name__ == '__main__': manager.run() ================================================ FILE: src/utils/model.py ================================================ #coding=utf-8 ''' 2 tables: users, usergroup User: id username password avatar nickname description status student_number department truename tel e_mail register_date user_group auth_method Usergroup id name Token expiration can be set in User.generate_auth_token ''' from flask import Flask from flask_sqlalchemy import SQLAlchemy from datetime import datetime from base64 import b64encode, b64decode import os, json #this class from itsdangerous implements token<->user from itsdangerous import TimedJSONWebSignatureSerializer as Serializer from itsdangerous import SignatureExpired, BadSignature from utils import env fsdir = env.getenv('FS_PREFIX') app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+fsdir+'/global/sys/UserTable.db' app.config['SQLALCHEMY_BINDS'] = { 'history': 'sqlite:///'+fsdir+'/global/sys/HistoryTable.db', 'beansapplication': 'sqlite:///'+fsdir+'/global/sys/BeansApplication.db', 'system': 'sqlite:///'+fsdir+'/global/sys/System.db', 'batch':'sqlite:///'+fsdir+'/global/sys/Batch.db?check_same_thread=False', 'login': 'sqlite:///'+fsdir+'/global/sys/Login.db' } app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True try: secret_key_file = open(env.getenv('FS_PREFIX') + '/local/token_secret_key.txt') app.secret_key = secret_key_file.read() secret_key_file.close() except: from os import urandom secret_key = urandom(24) secret_key = b64encode(secret_key).decode('utf-8') app.secret_key = secret_key secret_key_file = open(env.getenv('FS_PREFIX') + '/local/token_secret_key.txt', 'w') secret_key_file.write(secret_key) secret_key_file.close() db = SQLAlchemy(app) class User(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(10), unique=True) password = db.Column(db.String(100)) avatar = db.Column(db.String(30)) nickname = db.Column(db.String(10)) description = db.Column(db.String(15)) status = db.Column(db.String(10)) e_mail = db.Column(db.String(20)) student_number = db.Column(db.String(20)) department = db.Column(db.String(20)) truename = db.Column(db.String(20)) tel = db.Column(db.String(20)) register_date = db.Column(db.String(10)) user_group = db.Column(db.String(50)) auth_method = db.Column(db.String(10)) beans = db.Column(db.Integer) def __init__(self, username, password, avatar="default.png", nickname = "", description = "", status = "init", e_mail = "" , student_number = "", department = "", truename = "", tel="", date = None, usergroup = "primary" , auth_method = "local"): # using sha512 #if (len(password) <= 6): # self = None # return None self.username = username self.password = password self.avatar = avatar self.nickname = nickname self.description = description self.status = status self.e_mail = e_mail self.student_number = student_number self.department = department self.truename = truename self.tel = tel self.beans = 150 if (date != None): self.register_date = date else: self.register_date = datetime.now() self.user_group = usergroup self.auth_method = auth_method def __repr__(self): return '' % (self.username) #token will expire after 3600s def generate_auth_token(self, expiration = 3600): s = Serializer(app.config['SECRET_KEY'], expires_in = expiration) str = s.dumps({'id': self.id}) return b64encode(str).decode('utf-8') @staticmethod def verify_auth_token(token): s = Serializer(app.config['SECRET_KEY']) try: data = s.loads(b64decode(token)) except SignatureExpired: return None # valid token, but expired except BadSignature: return None # invalid token user = User.query.get(data['id']) return user class UserGroup(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(50)) cpu = db.Column(db.String(10)) memory = db.Column(db.String(10)) imageQuantity = db.Column(db.String(10)) lifeCycle = db.Column(db.String(10)) def __init__(self, name): self.name = name self.cpu = '100000' self.memory = '2000' self.imageQuantity = '10' self.lifeCycle = '24' def __repr__(self): return '' % self.name class UserUsage(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(50)) cpu = db.Column(db.String(10)) memory = db.Column(db.String(10)) disk = db.Column(db.String(10)) def __init__(self, name): self.username = name self.cpu = '0' self.memory = '0' self.disk = '0' def __repr__(self): return 'cpu:%s memory:%s disk:%s' % (self.username,self.cpu,self.memory,self.disk) class Notification(db.Model): id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String(100)) content = db.Column(db.String(8000)) create_date = db.Column(db.String(10)) # Status: 'open' -> Open to user, 'closed' -> Closed to user status = db.Column(db.String(20)) def __init__(self, title, content=''): self.title = title self.content = content self.create_date = datetime.utcnow() self.status = 'open' def __repr__(self): return '' % self.title class NotificationGroups(db.Model): # __tablename__ = 'notification_groups' id = db.Column(db.Integer, primary_key=True) notification_id = db.Column(db.Integer) group_name = db.Column(db.String(100)) def __init__(self, notification_id, group_name): self.notification_id = notification_id self.group_name = group_name def __repr__(self): return '' % (self.notification_id, self.group_name) class UserNotificationPair(db.Model): id = db.Column(db.Integer, primary_key=True) userName = db.Column(db.String(10)) notifyId = db.Column(db.Integer) isRead = db.Column(db.Integer) def __init__(self, username, notifyid): self.userName = username self.notifyId = notifyid self.isRead = 0 def __repr__(self): return '' % (self.userName, self.notifyId) class LoginMsg(db.Model): __bind_key__ = 'login' id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(10)) userip = db.Column(db.String(20)) time = db.Column(db.DateTime) def __init__(self, username, userip): self.username = username self.userip = userip self.time = datetime.now() def __repr__(self): return '' % (self.id,self.username,self.userip,self.time.strftime("%Y-%m-%d %H:%M:%S")) class LoginFailMsg(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(10), unique=True) failcnt = db.Column(db.Integer) bantime = db.Column(db.DateTime) def __init__(self, username): self.username = username self.failcnt = 0 self.bantime = datetime.now() def __repr__(self): return '' % (self.id,self.username,self.failcnt,self.bantime.strftime("%Y-%m-%d %H:%M:%S")) class VNode(db.Model): __bind_key__ = 'history' name = db.Column(db.String(100), primary_key=True) laststopcpuval = db.Column(db.Float) laststopruntime = db.Column(db.Integer) billing = db.Column(db.Integer) histories = db.relationship('History', backref='v_node', lazy='dynamic') def __init__(self, vnode_name): self.name = vnode_name self.laststopcpuval = 0 self.billing = 0 self.laststopruntime = 0 def __repr__(self): return '' % (self.name) class History(db.Model): __bind_key__ = 'history' id = db.Column(db.Integer, primary_key=True) vnode = db.Column(db.String(100), db.ForeignKey('v_node.name')) action = db.Column(db.String(30)) runningtime = db.Column(db.Integer) cputime = db.Column(db.Float) billing = db.Column(db.Integer) actionTime = db.Column(db.DateTime) def __init__(self, action, runningtime, cputime, billing): self.action = action self.runningtime = runningtime self.cputime = cputime self.billing = billing self.actionTime = datetime.now() def __repr__(self): return "{\"id\":\"%d\",\"vnode\":\"%s\",\"action\":\"%s\",\"runningtime\":\"%d\",\"cputime\":\"%f\",\"billing\":\"%d\",\"actionTime\":\"%s\"}" % (self.id, self.vnode, self.action, self.runningtime, self.cputime, self.billing, self.actionTime.strftime("%Y-%m-%d %H:%M:%S")) class ApplyMsg(db.Model): __bind_key__ = 'beansapplication' id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(10)) number = db.Column(db.Integer) reason = db.Column(db.String(600)) status = db.Column(db.String(10)) time = db.Column(db.DateTime(10)) def __init__(self,username, number, reason): self.username = username self.number = number self.reason = reason self.status = "Processing" self.time = datetime.now() def ch2dict(self): ans = {} ans['id'] = self.id ans['username'] = self.username ans['number'] = self.number ans['reason'] = self.reason ans['status'] = self.status ans['time'] = self.time.strftime("%Y-%m-%d %H:%M:%S") return ans def __repr__(self): return "{\"id\":\"%d\", \"username\":\"%s\", \"number\": \"%d\", \"reason\":\"%s\", \"status\":\"%s\", \"time\":\"%s\"}" % (self.id, self.username, self.number, self.reason, self.status, self.time.strftime("%Y-%m-%d %H:%M:%S")) class Container(db.Model): __bind_key__ = 'system' containername = db.Column(db.String(100), primary_key=True) hostname = db.Column(db.String(30)) ip = db.Column(db.String(20)) host = db.Column(db.String(20)) image = db.Column(db.String(50)) lastsave = db.Column(db.DateTime) setting_cpu = db.Column(db.Integer) setting_mem = db.Column(db.Integer) setting_disk = db.Column(db.Integer) vclusterid = db.Column(db.Integer, db.ForeignKey('v_cluster.clusterid')) def __init__(self, containername, hostname, ip, host, image, lastsave, setting): self.containername = containername self.hostname = hostname self.ip = ip self.host = host self.image = image self.lastsave = lastsave self.setting_cpu = int(setting['cpu']) self.setting_mem = int(setting['memory']) self.setting_disk = int(setting['disk']) def __repr__(self): return "{\"containername\":\"%s\", \"hostname\":\"%s\", \"ip\": \"%s\", \"host\":\"%s\", \"image\":\"%s\", \"lastsave\":\"%s\", \"setting\":{\"cpu\":\"%d\",\"memory\":\"%d\",\"disk\":\"%d\"}}" % (self.containername, self.hostname, self.ip, self.host, self.image, self.lastsave.strftime("%Y-%m-%d %H:%M:%S"), self.setting_cpu, self.setting_mem, self.setting_disk) class PortMapping(db.Model): __bind_key__ = 'system' id = db.Column(db.Integer, primary_key=True, autoincrement=True) node_name = db.Column(db.String(100)) node_ip = db.Column(db.String(20)) node_port = db.Column(db.Integer) host_port= db.Column(db.Integer) vclusterid = db.Column(db.Integer, db.ForeignKey('v_cluster.clusterid')) def __init__(self, node_name, node_ip, node_port, host_port): self.node_name = node_name self.node_ip = node_ip self.node_port = int(node_port) self.host_port = int(host_port) def __repr__(self): return "{\"id\":\"%d\", \"node_name\":\"%s\", \"node_ip\": \"%s\", \"node_port\":\"%s\", \"host_port\":\"%s\"}" % (self.id, self.node_name, self.node_ip, self.node_port, self.host_port) class BillingHistory(db.Model): __bind_key__ = 'system' node_name = db.Column(db.String(100), primary_key=True) vclusterid = db.Column(db.Integer, db.ForeignKey('v_cluster.clusterid')) cpu = db.Column(db.Float) mem = db.Column(db.Float) disk = db.Column(db.Float) port = db.Column(db.Float) def __init__(self,node_name,cpu,mem,disk,port): self.node_name = node_name self.cpu = cpu self.mem = mem self.disk = disk self.port = port def __repr__(self): return "{\"node_name\":\"%s\", \"cpu\": %f, \"mem\": %f, \"disk\": %f, \"port\": %f}" % (self.node_name, self.cpu, self.mem, self.disk, self.port) class VCluster(db.Model): __bind_key__ = 'system' clusterid = db.Column(db.BigInteger, primary_key=True, autoincrement=False) clustername = db.Column(db.String(50)) ownername = db.Column(db.String(20)) status = db.Column(db.String(10)) size = db.Column(db.Integer) containers = db.relationship('Container', backref='v_cluster', lazy='dynamic') nextcid = db.Column(db.Integer) create_time = db.Column(db.DateTime) start_time = db.Column(db.String(20)) stop_time = db.Column(db.DateTime) is_warned = db.Column(db.Boolean) proxy_server_ip = db.Column(db.String(20)) proxy_public_ip = db.Column(db.String(20)) port_mapping = db.relationship('PortMapping', backref='v_cluster', lazy='dynamic') billing_history = db.relationship('BillingHistory', backref='v_cluster', lazy='dynamic') def __init__(self, clusterid, clustername, ownername, status, size, nextcid, proxy_server_ip, proxy_public_ip): self.clusterid = clusterid self.clustername = clustername self.ownername = ownername self.status = status self.size = size self.nextcid = nextcid self.proxy_server_ip = proxy_server_ip self.proxy_public_ip = proxy_public_ip self.containers = [] self.port_mapping = [] self.billing_history = [] self.create_time = datetime.now() self.start_time = "------" self.stop_time = datetime.now() self.is_warned = False def __repr__(self): info = {} info["clusterid"] = self.clusterid info["clustername"] = self.clustername info["ownername"] = self.ownername info["status"] = self.status info["size"] = self.size info["proxy_server_ip"] = self.proxy_server_ip info["proxy_public_ip"] = self.proxy_public_ip info["nextcid"] = self.nextcid info["create_time"] = self.create_time.strftime("%Y-%m-%d %H:%M:%S") info["start_time"] = self.start_time if self.stop_time is None: info['stop_time'] = "------" else: info['stop_time'] = self.stop_time.strftime("%Y-%m-%d %H:%M:%S") info["is_warned"] = self.is_warned info["containers"] = [dict(eval(str(con))) for con in self.containers] info["port_mapping"] = [dict(eval(str(pm))) for pm in self.port_mapping] info["billing_history"] = [dict(eval(str(bh))) for bh in self.billing_history] #return "{\"clusterid\":\"%d\", \"clustername\":\"%s\", \"ownername\": \"%s\", \"status\":\"%s\", \"size\":\"%d\", \"proxy_server_ip\":\"%s\", \"create_time\":\"%s\"}" % (self.clusterid, self.clustername, self.ownername, self.status, self.size, self.proxy_server_ip, self.create_time.strftime("%Y-%m-%d %H:%M:%S")) return json.dumps(info) class Image(db.Model): __bind_key__ = 'system' imagename = db.Column(db.String(50)) id = db.Column(db.Integer, primary_key=True) hasPrivate = db.Column(db.Boolean) hasPublic = db.Column(db.Boolean) ownername = db.Column(db.String(20)) create_time = db.Column(db.DateTime) description = db.Column(db.Text) def __init__(self,imagename,hasPrivate,hasPublic,ownername,description): self.imagename = imagename self.hasPrivate = hasPrivate self.hasPublic = hasPublic self.ownername = ownername self.description = description self.create_time = datetime.now() def __repr__(self): return "{\"id\":\"%d\",\"imagename\":\"%s\",\"hasPrivate\":\"%s\",\"hasPublic\":\"%s\",\"ownername\":\"%s\",\"updatetime\":\"%s\",\"description\":\"%s\"}" % (self.id,self.imagename,str(self.hasPrivate),str(self.hasPublic),self.create_time.strftime("%Y-%m-%d %H:%M:%S"),self.ownername,self.description) class Batchjob(db.Model): __bind_key__ = 'batch' id = db.Column(db.String(9), primary_key=True) username = db.Column(db.String(10)) name = db.Column(db.String(30)) priority = db.Column(db.Integer) status = db.Column(db.String(10)) failed_reason = db.Column(db.Text) create_time = db.Column(db.DateTime) end_time = db.Column(db.DateTime) billing = db.Column(db.Integer) tasks = db.relationship('Batchtask', backref='batchjob', lazy='dynamic') def __init__(self,id,username,name,priority): self.id = id self.username = username self.name = name self.priority = priority self.status = "pending" self.failed_reason = "" self.create_time = datetime.now() self.end_time = None self.billing = 0 def clear(self): self.status = "pending" self.failed_reason = "" self.end_time = None self.billing = 0 def __repr__(self): info = {} info['job_id'] = self.id info['username'] = self.username info['job_name'] = self.name info['priority'] = self.priority info['status'] = self.status info['failed_reason'] = self.failed_reason info['create_time'] = self.create_time.strftime("%Y-%m-%d %H:%M:%S") if self.end_time is None: info['end_time'] = "------" else: info['end_time'] = self.end_time.strftime("%Y-%m-%d %H:%M:%S") info['billing'] = self.billing return json.dumps(info) class Batchtask(db.Model): __bind_key__ = 'batch' id = db.Column(db.String(15), primary_key=True) idx = db.Column(db.String(10)) jobid = db.Column(db.String(9), db.ForeignKey('batchjob.id')) status = db.Column(db.String(15)) failed_reason = db.Column(db.Text) start_time = db.Column(db.DateTime) end_time = db.Column(db.DateTime) running_time = db.Column(db.Integer) billing = db.Column(db.Integer) config = db.Column(db.Text) tried_times = db.Column(db.Integer) def __init__(self, id, idx, config): self.id = id self.idx = idx self.status = "pending" self.failed_reason = "" self.start_time = None self.end_time = None self.running_time = 0 self.billing = 0 self.config = json.dumps(config) self.tried_times = 0 def clear(self): self.status = "pending" self.failed_reason = "" self.start_time = None self.end_time = None self.running_time = 0 self.billing = 0 self.tried_times = 0 def __repr__(self): info = {} info['id'] = self.id info['idx'] = self.idx info['jobid'] = self.jobid info['status'] = self.status info['failed_reason'] = self.failed_reason if self.start_time is None: info['start_time'] = "------" else: info['start_time'] = self.start_time.strftime("%Y-%m-%d %H:%M:%S") if self.end_time is None: info['end_time'] = "------" else: info['end_time'] = self.end_time.strftime("%Y-%m-%d %H:%M:%S") info['running_time'] = self.running_time info['billing'] = self.billing info['config'] = json.loads(self.config) info['tried_times'] = self.tried_times return json.dumps(info) ================================================ FILE: src/utils/nettools.py ================================================ #!/usr/bin/python3 import subprocess, threading from utils.log import logger from utils import env class ipcontrol(object): @staticmethod def parse(cmdout): links = {} thislink = None for line in cmdout.splitlines(): # empty line if len(line)==0: continue # Level 1 : first line of one link if line[0] != ' ': blocks = line.split() thislink = blocks[1].strip(':') links[thislink] = {} links[thislink]['state'] = blocks[blocks.index('state')+1] if 'state' in blocks else 'UNKNOWN' # Level 2 : line with 4 spaces elif line[4] != ' ': blocks = line.split() if blocks[0] == 'inet': if 'inet' not in links[thislink]: links[thislink]['inet'] = [] links[thislink]['inet'].append(blocks[1]) # we just need inet (IPv4) else: pass # Level 3 or more : no need for us else: pass return links @staticmethod def list_links(): try: ret = subprocess.run(['ip', 'link', 'show'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) links = ipcontrol.parse(ret.stdout.decode('utf-8')) return [True, list(links.keys())] except subprocess.CalledProcessError as suberror: return [False, "list links failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def link_exist(linkname): try: subprocess.run(['ip', 'link', 'show', 'dev', str(linkname)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return True except subprocess.CalledProcessError: return False @staticmethod def link_info(linkname): try: ret = subprocess.run(['ip', 'address', 'show', 'dev', str(linkname)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, ipcontrol.parse(ret.stdout.decode('utf-8'))[str(linkname)]] except subprocess.CalledProcessError as suberror: return [False, "get link info failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def link_state(linkname): try: ret = subprocess.run(['ip', 'link', 'show', 'dev', str(linkname)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, ipcontrol.parse(ret.stdout.decode('utf-8'))[str(linkname)]['state']] except subprocess.CalledProcessError as suberror: return [False, "get link state failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def link_ips(linkname): [status, info] = ipcontrol.link_info(str(linkname)) if status: if 'inet' not in info: return [True, []] else: return [True, info['inet']] else: return [False, info] @staticmethod def up_link(linkname): try: subprocess.run(['ip', 'link', 'set', 'dev', str(linkname), 'up'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(linkname)] except subprocess.CalledProcessError as suberror: return [False, "set link up failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def down_link(linkname): try: subprocess.run(['ip', 'link', 'set', 'dev', str(linkname), 'down'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(linkname)] except subprocess.CalledProcessError as suberror: return [False, "set link down failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def add_addr(linkname, address): try: subprocess.run(['ip', 'address', 'add', address, 'dev', str(linkname)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(linkname)] except subprocess.CalledProcessError as suberror: return [False, "add address failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def del_addr(linkname, address): try: subprocess.run(['ip', 'address', 'del', address, 'dev', str(linkname)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(linkname)] except subprocess.CalledProcessError as suberror: return [False, "delete address failed : %s" % suberror.stdout.decode('utf-8')] # ovs-vsctl list-br # ovs-vsctl br-exists # ovs-vsctl add-br # ovs-vsctl del-br # ovs-vsctl list-ports # ovs-vsctl del-port # ovs-vsctl add-port -- set interface type=gre options:remote_ip= # ovs-vsctl add-port tag= -- set interface type=internal # ovs-vsctl port-to-br # ovs-vsctl set Port tag= # ovs-vsctl clear Port tag class ovscontrol(object): @staticmethod def list_bridges(): try: ret = subprocess.run(['ovs-vsctl', 'list-br'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, ret.stdout.decode('utf-8').split()] except subprocess.CalledProcessError as suberror: return [False, "list bridges failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def bridge_exist(bridge): try: subprocess.run(['ovs-vsctl', 'br-exists', str(bridge)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return True except subprocess.CalledProcessError: return False @staticmethod def port_tobridge(port): try: ret = subprocess.run(['ovs-vsctl', 'port-to-br', str(port)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, ret.stdout.decode('utf-8').strip()] except subprocess.CalledProcessError as suberror: return [False, suberror.stdout.decode('utf-8')] @staticmethod def port_exists(port): return ovscontrol.port_tobridge(port)[0] @staticmethod def add_bridge(bridge): try: subprocess.run(['ovs-vsctl', '--may-exist', 'add-br', str(bridge)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(bridge)] except subprocess.CalledProcessError as suberror: return [False, "add bridge failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def del_bridge(bridge): try: subprocess.run(['ovs-vsctl', 'del-br', str(bridge)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(bridge)] except subprocess.CalledProcessError as suberror: return [False, "del bridge failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def list_ports(bridge): try: ret = subprocess.run(['ovs-vsctl', 'list-ports', str(bridge)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, ret.stdout.decode('utf-8').split()] except subprocess.CalledProcessError as suberror: return [False, "list ports failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def del_port(bridge, port): try: subprocess.run(['ovs-vsctl', 'del-port', str(bridge), str(port)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(port)] except subprocess.CalledProcessError as suberror: return [False, "delete port failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def add_port(bridge, port): try: subprocess.run(['ovs-vsctl', '--may-exist', 'add-port', str(bridge), str(port)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(port)] except subprocess.CalledProcessError as suberror: return [False, "add port failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def add_port_internal(bridge, port): try: subprocess.run(['ovs-vsctl', '--may-exist', 'add-port', str(bridge), str(port), '--', 'set', 'interface', str(port), 'type=internal'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(port)] except subprocess.CalledProcessError as suberror: return [False, "add port failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def add_port_internal_withtag(bridge, port, tag): try: subprocess.run(['ovs-vsctl', 'add-port', str(bridge), str(port), 'tag='+str(tag), '--', 'set', 'interface', str(port), 'type=internal'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(port)] except subprocess.CalledProcessError as suberror: return [False, "add port failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def add_port_gre(bridge, port, remote): try: subprocess.run(['ovs-vsctl', '--may-exist', 'add-port', str(bridge), str(port), '--', 'set', 'interface', str(port), 'type=gre', 'options:remote_ip='+str(remote)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(port)] except subprocess.CalledProcessError as suberror: return [False, "add port failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def add_port_gre_withkey(bridge, port, remote, key): try: subprocess.run(['ovs-vsctl', '--may-exist', 'add-port', str(bridge), str(port), '--', 'set', 'interface', str(port), 'type=gre', 'options:remote_ip='+str(remote), 'options:key='+str(key)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(port)] except subprocess.CalledProcessError as suberror: return [False, "add port failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def set_port_tag(port, tag): try: subprocess.run(['ovs-vsctl', 'set', 'Port', str(port), 'tag='+str(tag)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(port)] except subprocess.CalledProcessError as suberror: return [False, "set port tag failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def set_port_input_qos(port, input_rate_limit): input_rate_limiting = int(input_rate_limit)*1000 if input_rate_limiting == 0: return [True, str(port)] try: p = subprocess.run(['ovs-vsctl', 'create', 'qos', 'type=linux-htb', 'other_config:max-rate='+str(input_rate_limiting)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) subprocess.run(['ovs-vsctl', 'set', 'Port', str(port), 'qos='+p.stdout.decode('utf-8').rstrip()], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(port)] except subprocess.CalledProcessError as suberror: return [False, "set port input qos failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def del_port_input_qos(port): try: p = subprocess.run(['ovs-vsctl', 'get', 'port', str(port), 'qos'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) subprocess.run(['ovs-vsctl', 'clear', 'port', str(port), 'qos'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) subprocess.run(['ovs-vsctl', 'destroy', 'qos', p.stdout.decode('utf-8').rstrip()], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(port)] except subprocess.CalledProcessError as suberror: return [False, "del port input qos failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def set_port_output_qos(port, output_rate_limit): try: subprocess.run(['ovs-vsctl', 'set', 'interface', str(port), 'ingress_policing_rate='+str(output_rate_limit)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) subprocess.run(['ovs-vsctl', 'set', 'interface', str(port), 'ingress_policing_burst='+str(output_rate_limit)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(port)] except subprocess.CalledProcessError as suberror: return [False, "set port output qos failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def del_port_output_qos(port): try: subprocess.run(['ovs-vsctl', 'set', 'interface', str(port), 'ingress_policing_rate=0'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) subprocess.run(['ovs-vsctl', 'set', 'interface', str(port), 'ingress_policing_burst=0'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(port)] except subprocess.CalledProcessError as suberror: return [False, "del port output qos failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def destroy_all_qos(): try: ret = subprocess.run(['ovs-vsctl', '--all', 'destroy', 'qos'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, 'succeed to destroying all qos.'] except subprocess.CalledProcessError as suberror: return [False, "destroy all qos failed : %s" % suberror.stdout.decode('utf-8')] class netcontrol(object): @staticmethod def bridge_exists(bridge): return ovscontrol.bridge_exist(bridge) @staticmethod def del_bridge(bridge): return ovscontrol.del_bridge(bridge) @staticmethod def new_bridge(bridge): return ovscontrol.add_bridge(bridge) @staticmethod def gre_exists(bridge, remote): # port is unique, bridge is not necessary return ovscontrol.port_exists('gre-'+str(remote)) @staticmethod def setup_gre(bridge, remote): return ovscontrol.add_port_gre(bridge, 'gre-'+str(remote), remote) @staticmethod def gw_exists(bridge, gwport): return ovscontrol.port_exists(gwport) @staticmethod def setup_gw(bridge, gwport, addr, input_rate_limit, output_rate_limit): [status, result] = ovscontrol.add_port_internal(bridge, gwport) if not status: return [status, result] [status, result] = ipcontrol.add_addr(gwport, addr) if not status: return [status, result] [status, result] = ipcontrol.up_link(gwport) if not status: return [status, result] [status, result] = ovscontrol.set_port_input_qos(gwport, input_rate_limit) if not status: return [status, result] return ovscontrol.set_port_output_qos(gwport, output_rate_limit) @staticmethod def del_gw(bridge, gwport): [status, result] = ovscontrol.del_port_input_qos(gwport) if not status: return [status, result] [status, result] = ovscontrol.del_port_output_qos(gwport) if not status: return [status, result] return ovscontrol.del_port(bridge, gwport) @staticmethod def check_gw(bridge, gwport, uid, addr, input_rate_limit, output_rate_limit): ovscontrol.add_bridge(bridge) if not netcontrol.gw_exists(bridge, gwport): return netcontrol.setup_gw(bridge, gwport, addr, input_rate_limit, output_rate_limit) [status, info] = ipcontrol.link_info(gwport) if not status: return [False, "get gateway info failed"] if ('inet' not in info) or (addr not in info['inet']): ipcontrol.add_addr(gwport, addr) else: info['inet'].remove(addr) for otheraddr in info['inet']: ipcontrol.del_addr(gwport, otheraddr) if info['state'] == 'DOWN': ipcontrol.up_link(gwport) return [True, "check gateway port %s" % gwport] @staticmethod def recover_usernet(portname, uid, GatewayHost, isGatewayHost): ovscontrol.add_bridge("docklet-br-"+str(uid)) if not isGatewayHost: [success, ports] = ovscontrol.list_ports("docklet-br-"+str(uid)) if success: for port in ports: if port.startswith("gre") and (not port == ("gre-"+str(uid)+"-"+GatewayHost) ) : ovscontrol.del_port("docklet-br-"+str(uid),port) ovscontrol.add_port_gre_withkey("docklet-br-"+str(uid), "gre-"+str(uid)+"-"+GatewayHost, GatewayHost, str(uid)) ovscontrol.add_port("docklet-br-"+str(uid), portname) free_ports = [False]*65536 allocated_ports = {} ports_lock = threading.Lock() class portcontrol(object): @staticmethod def init_new(): Free_Ports_str = env.getenv("ALLOCATED_PORTS") global free_ports #logger.info(Free_Ports_str) portsranges=Free_Ports_str.split(',') #logger.info(postranges) for portsrange in portsranges: portsrange=portsrange.strip().split('-') start = int(portsrange[0]) end = int(portsrange[1]) if end < start or end > 65535 or start < 1: return [False, "Illegal port ranges."] i = start #logger.info(str(start)+" "+str(end)) while i <= end: free_ports[i] = True i += 1 #logger.info(free_ports[10001]) return [True,""] @staticmethod def init_recovery(Free_Ports_str): Free_Ports_str = env.getenv("ALLOCATED_PORTS") return [True,""] @staticmethod def acquire_port_mapping(container_name, container_ip, container_port, host_port=None): global free_ports global allocated_ports global ports_lock ports_lock.acquire() # if container_name in allocated_ports.keys(): # return [False, "This container already has a port mapping."] if container_name not in allocated_ports.keys(): allocated_ports[container_name] = {} elif container_port in allocated_ports[container_name].keys(): ports_lock.release() return [False, "This container port already has a port mapping."] if container_name == "" or container_ip == "" or container_port == "": ports_lock.release() return [False, "Node Name or Node IP or Node Port can't be null."] #print("acquire_port_mapping1") free_port = 1 if host_port is not None: # recover from host_port free_port = int(host_port) else: # acquire new free port while free_port <= 65535: if free_ports[free_port]: break free_port += 1 if free_port == 65536: ports_lock.release() return [False, "No free ports."] free_ports[free_port] = False allocated_ports[container_name][container_port] = free_port public_ip = env.getenv("PUBLIC_IP") ports_lock.release() try: subprocess.run(['iptables','-t','nat','-A','PREROUTING','-p','tcp','--dport',str(free_port),"-j","DNAT",'--to-destination','%s:%s'%(container_ip,container_port)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) except subprocess.CalledProcessError as suberror: return [False, "set port mapping failed : %s" % suberror.stdout.decode('utf-8')] try: subprocess.run(['iptables','-t','nat','-A','PREROUTING','-p','udp','--dport',str(free_port),"-j","DNAT",'--to-destination','%s:%s'%(container_ip,container_port)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(free_port)] except subprocess.CalledProcessError as suberror: return [False, "set port mapping failed : %s" % suberror.stdout.decode('utf-8')] @staticmethod def release_port_mapping(container_name, container_ip, container_port): global free_ports global allocated_ports global ports_lock if container_name not in allocated_ports.keys(): return [False, "This container does not have a port mapping."] free_port = allocated_ports[container_name][container_port] public_ip = env.getenv("PUBLIC_IP") try: subprocess.run(['iptables','-t','nat','-D','PREROUTING','-p','tcp','--dport',str(free_port),"-j","DNAT",'--to-destination','%s:%s'%(container_ip,container_port)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) except subprocess.CalledProcessError as suberror: return [False, "release port mapping failed : %s" % suberror.stdout.decode('utf-8')] try: subprocess.run(['iptables','-t','nat','-D','PREROUTING','-p','udp','--dport',str(free_port),"-j","DNAT",'--to-destination','%s:%s'%(container_ip,container_port)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) except subprocess.CalledProcessError as suberror: return [False, "release port mapping failed : %s" % suberror.stdout.decode('utf-8')] ports_lock.acquire() free_ports[free_port] = True allocated_ports[container_name].pop(container_port) ports_lock.release() return [True, ""] ================================================ FILE: src/utils/proxytool.py ================================================ #!/usr/bin/python3 import requests, json from utils import env proxy_api_port = env.getenv("PROXY_API_PORT") proxy_control="http://localhost:"+ str(proxy_api_port) +"/api/routes" def get_routes(): try: resp = requests.get(proxy_control) except: return [False, 'Connect Failed'] return [True, resp.json()] def set_route(path, target): path='/'+path.strip('/') if path=='' or target=='': return [False, 'input not valid'] try: resp = requests.post(proxy_control+path, data=json.dumps({'target':target})) except: return [False, 'Connect Failed'] return [True, 'set ok'] def delete_route(path): path='/'+path.strip('/') try: resp = requests.delete(proxy_control+path) except: return [False, 'Connect Failed'] # if exist and delete, status_code=204, if not exist, status_code=404 return [True, 'delete ok'] ================================================ FILE: src/utils/tools.py ================================================ #!/usr/bin/python3 import os, random #from log import logger def loadenv(configpath): configfile = open(configpath) #logger.info ("load environment from %s" % configpath) for line in configfile: line = line.strip() if line == '': continue keyvalue = line.split("=") if len(keyvalue) < 2: continue key = keyvalue[0].strip() value = keyvalue[1].strip() #logger.info ("load env and put env %s:%s" % (key, value)) os.environ[key] = value def gen_token(): return str(random.randint(10000, 99999))+"-"+str(random.randint(10000, 99999)) ================================================ FILE: src/utils/updatebase.py ================================================ #!/usr/bin/python3 import os, shutil from utils.log import logger def aufs_remove(basefs): try: if os.path.isdir(basefs): shutil.rmtree(basefs) elif os.path.isfile(basefs): os.remove(basefs) except Exception as e: logger.error(e) def aufs_clean(basefs): # clean the aufs mark allfiles = os.listdir(basefs) for onefile in allfiles: if onefile[:4] == ".wh.": aufs_remove(basefs + "/" + onefile) def aufs_merge(image, basefs): allfiles = os.listdir(image) if ".wh..wh..opq" in allfiles: #this is a new dir in image, remove the dir in basefs with the same name, and copy it to basefs shutil.rmtree(basefs) shutil.copytree(image, basefs, symlinks=True) aufs_clean(basefs) return for onefile in allfiles: try: if onefile[:7] == ".wh..wh": # aufs mark, but not white-out mark, ignore it continue elif onefile[:4] == ".wh.": # white-out mark, remove the file in basefs aufs_remove(basefs + "/" + onefile[4:]) elif os.path.isdir(image + "/" + onefile): if os.path.isdir(basefs + "/" + onefile): # this is a dir in image and basefs, merge it aufs_merge(image + "/" + onefile, basefs + "/" + onefile) elif os.path.isfile(basefs + "/" + onefile): # this is a dir in image but file in basefs, remove the file and copy the dir to basefs os.remove(basefs + "/" + onefile) shutil.copytree(image + "/" + onefile, basefs + "/" + onefile, symlinks=True) elif not os.path.exists(basefs + "/" + onefile): # this is a dir in image but not exists in basefs, copy the dir to basefs shutil.copytree(image + "/" + onefile, basefs + "/" + onefile, symlinks=True) else: # error logger.error(basefs + "/" + onefile + " cause error") elif os.path.isfile(image + "/" + onefile): if os.path.isdir(basefs + "/" + onefile): # this is a file in image but dir in basefs, remove the dir and copy the file to basefs shutil.rmtree(basefs + "/" + onefile) shutil.copy2(image+ "/" + onefile, basefs + "/" + onefile, follow_symlinks=False) elif os.path.isfile(basefs + "/" + onefile): # this is a file in image and basefs, remove the file and copy the file to basefs os.remove(basefs + "/" + onefile) shutil.copy2(image+ "/" + onefile, basefs + "/" + onefile, follow_symlinks=False) elif not os.path.isdir(basefs + "/" + onefile): # this is a file in image but not exists in basefs, copy the file to basefs shutil.copy2(image+ "/" + onefile, basefs + "/" + onefile, follow_symlinks=False) else: # error logger.error(basefs + "/" + onefile + " cause error") except Exception as e: logger.error(e) def aufs_update_base(image, basefs): if not os.path.isdir(basefs): logger.error("basefs:%s doesn't exists" % basefs) if not os.path.isdir(image): logger.error("image:%s doesn't exists" % image) aufs_merge(image, basefs) ================================================ FILE: src/worker/container.py ================================================ #!/usr/bin/python3 import subprocess, os, json, traceback from utils.log import logger from utils import env, imagemgr from utils.lvmtool import sys_run, check_volume from worker.monitor import Container_Collector, History_Manager import lxc from utils import model class Container(object): def __init__(self, addr, etcdclient): self.addr = addr self.etcd=etcdclient self.libpath = env.getenv('DOCKLET_LIB') self.confpath = env.getenv('DOCKLET_CONF') self.fspath = env.getenv('FS_PREFIX') # set jupyter running dir in container self.rundir = "/home/jupyter" # set root running dir in container self.nodehome = "/root" self.lxcpath = "/var/lib/lxc" self.imgmgr = imagemgr.ImageMgr() self.historymgr = History_Manager() def prepare_hook_conf(self, conf_path, env_dict): try: confile = open(conf_path, "w") for k,v in env_dict.items(): confile.write("%s=%s\n"%(k,v)) confile.close() except Exception as e: logger.error(traceback.format_exc()) return [False, e] return [True, ""] def create_container(self, lxc_name, proxy_server_ip, username, uid, setting, clustername, clusterid, containerid, hostname, ip, gateway, image): logger.info("create container %s of %s for %s" %(lxc_name, clustername, username)) try: setting = json.loads(setting) cpu = int(setting['cpu']) * 100000 memory = setting["memory"] disk = setting["disk"] image = json.loads(image) status = self.imgmgr.prepareFS(username,image,lxc_name,disk) if not status: return [False, "Create container failed when preparing filesystem, possibly insufficient space"] #Ret = subprocess.run([self.libpath+"/lxc_control.sh", # "create", lxc_name, username, str(clusterid), hostname, # ip, gateway, str(cpu), str(memory)], stdout=subprocess.PIPE, # stderr=subprocess.STDOUT,shell=False, check=True) rootfs = "/var/lib/lxc/%s/rootfs" % lxc_name if not os.path.isdir("%s/global/users/%s" % (self.fspath,username)): path = env.getenv('DOCKLET_LIB') subprocess.call([path+"/master/userinit.sh", username]) logger.info("user %s directory not found, create it" % username) sys_run("mkdir -p /var/lib/lxc/%s" % lxc_name) logger.info("generate config file for %s" % lxc_name) def config_prepare(content): content = content.replace("%ROOTFS%",rootfs) content = content.replace("%HOSTNAME%",hostname) content = content.replace("%IP%",ip) content = content.replace("%GATEWAY%",gateway) content = content.replace("%CONTAINER_MEMORY%",str(memory)) content = content.replace("%CONTAINER_CPU%",str(cpu)) content = content.replace("%FS_PREFIX%",self.fspath) content = content.replace("%USERNAME%",username) content = content.replace("%CLUSTERID%",str(clusterid)) content = content.replace("%LXCSCRIPT%",env.getenv("LXC_SCRIPT")) content = content.replace("%LXCNAME%",lxc_name) content = content.replace("%UserID%",str(uid)) content = content.replace("%CLUSTERNAME%", clustername) content = content.replace("%VETHPAIR%", str(clusterid)+'-'+str(containerid)) return content conffile = open(self.confpath+"/container.conf", 'r') conftext = conffile.read() conffile.close() conftext = config_prepare(conftext) conffile = open("/var/lib/lxc/%s/config" % lxc_name,"w") conffile.write(conftext) conffile.close() if os.path.isfile(self.confpath+"/lxc.custom.conf"): conffile = open(self.confpath+"/lxc.custom.conf", 'r') conftext = conffile.read() conffile.close() conftext = config_prepare(conftext) conffile = open("/var/lib/lxc/%s/config" % lxc_name, 'a') conffile.write(conftext) conffile.close() hook_env = {} hook_env['Bridge'] = "docklet-br-%d" % uid hook_env['HNAME'] = hostname self.prepare_hook_conf(rootfs+"/../env.conf",hook_env) #logger.debug(Ret.stdout.decode('utf-8')) logger.info("create container %s success" % lxc_name) # get AUTH COOKIE URL for jupyter [status, authurl] = self.etcd.getkey("web/authurl") if not status: [status, masterip] = self.etcd.getkey("service/master") if status: webport = env.getenv("WEB_PORT") authurl = "http://%s:%s/jupyter" % (masterip, webport) else: logger.error ("get AUTH COOKIE URL failed for jupyter") authurl = "error" cookiename='docklet-jupyter-cookie' rundir = self.lxcpath+'/'+lxc_name+'/rootfs' + self.rundir logger.debug(rundir) if not os.path.exists(rundir): os.makedirs(rundir) else: if not os.path.isdir(rundir): os.remove(rundir) os.makedirs(rundir) jconfigpath = rundir + '/jupyter.config' config = open(jconfigpath, 'w') jconfigs="""USER=%s PORT=%d COOKIE_NAME=%s BASE_URL=%s HUB_PREFIX=%s HUB_API_URL=%s IP=%s """ % (username, 10000, cookiename, '/'+ proxy_server_ip +'/go/'+username+'/'+clustername, '/jupyter', authurl, ip.split('/')[0]) config.write(jconfigs) config.close() except subprocess.CalledProcessError as sube: logger.error('create container %s failed: %s' % (lxc_name, sube.stdout.decode('utf-8'))) return [False, "create container failed"] except Exception as e: logger.error(e) return [False, "create container failed"] self.historymgr.log(lxc_name,"Create") return [True, "create container success"] def delete_container(self, lxc_name): logger.info ("delete container:%s" % lxc_name) if self.imgmgr.deleteFS(lxc_name): Container_Collector.billing_increment(lxc_name) self.historymgr.log(lxc_name,"Delete") logger.info("delete container %s success" % lxc_name) return [True, "delete container success"] else: logger.info("delete container %s failed" % lxc_name) return [False, "delete container failed"] #status = subprocess.call([self.libpath+"/lxc_control.sh", "delete", lxc_name]) #if int(status) == 1: # logger.error("delete container %s failed" % lxc_name) # return [False, "delete container failed"] #else: # logger.info ("delete container %s success" % lxc_name) # return [True, "delete container success"] # start container, if running, restart it def start_container(self, lxc_name): logger.info ("start container:%s" % lxc_name) c = lxc.Container(lxc_name) if not c.start(): logger.error('start container %s failed' % lxc_name) return [False, "start container failed"] else: logger.info ("start container %s success" % lxc_name) self.historymgr.log(lxc_name,"Start") return [True, "start container success"] # start container services # for the master node, jupyter must be started, # for other node, ssh must be started. # container must be RUNNING before calling this service def start_services(self, lxc_name, services=[]): logger.info ("start services for container %s: %s" % (lxc_name, services)) c = lxc.Container(lxc_name) Ret = c.attach_wait(lxc.attach_run_command,["service","ssh","start"]) if Ret == 0: if len(services) == 0: # master node Ret = c.attach_wait(lxc.attach_run_command,["su","-c","%s/start_jupyter.sh" % self.rundir]) if Ret == 0: logger.info("start ssh and jupyter notebook services for container %s success" % lxc_name) return [True, "start container services success"] else: logger.error('start services for container %s failed:jupyter' % lxc_name) return [False, "start services for container failed:jupyter"] else: logger.info("start ssh service for container %s success" % lxc_name) return [True, "start container services success"] logger.error('start services for container %s failed:ssh' % lxc_name) return [False, "start services for container failed:ssh"] # mount_container: mount base image and user image by aufs def mount_container(self,lxc_name): logger.info ("mount container:%s" % lxc_name) [success, status] = self.container_status(lxc_name) if not success: return [False, status] self.imgmgr.checkFS(lxc_name) return [True, "mount success"] # recover container: if running, do nothing. if stopped, start it def recover_container(self, lxc_name): logger.info ("recover container:%s" % lxc_name) #status = subprocess.call([self.libpath+"/lxc_control.sh", "status", lxc_name]) [success, status] = self.container_status(lxc_name) if not success: return [False, status] self.imgmgr.checkFS(lxc_name) if status == 'stopped': logger.info("%s stopped, recover it to running" % lxc_name) if self.start_container(lxc_name)[0]: self.historymgr.log(lxc_name,"Recover") if self.start_services(lxc_name)[0]: logger.info("%s recover success" % lxc_name) return [True, "recover success"] else: logger.error("%s recover failed with services not start" % lxc_name) return [False, "recover failed for services not start"] else: logger.error("%s recover failed for container starting failed" % lxc_name) return [False, "recover failed for container starting failed"] else: logger.info("%s recover success" % lxc_name) return [True, "recover success"] def update_baseurl(self, lxc_name, old_ip, new_ip): rundir = self.lxcpath+'/'+lxc_name+'/rootfs' + self.rundir if not os.path.exists(rundir): return [False, "container %s doesn't exist"%(lxc_name)] jconfigpath = rundir + '/jupyter.config' config = open(jconfigpath, 'r') context = config.read() config.close() context = context.replace(old_ip+"/go", new_ip+"/go") config = open(jconfigpath, 'w') config.write(context) config.close() return [True,"success"] def stop_container(self, lxc_name): logger.info ("stop container:%s" % lxc_name) [success, status] = self.container_status(lxc_name) if not success: return [False, status] if status == "running": c = lxc.Container(lxc_name) if not c.stop(): logger.error("stop container %s failed" % lxc_name) return [False, "stop container failed"] else: self.historymgr.log(lxc_name,"Stop") logger.info("stop container %s success" % lxc_name) return [True, "stop container success"] else: logger.info("container %s already stopped" % lxc_name) return [True, "stop container success"] def detach_container(self, lxc_name): logger.info("detach container:%s" % lxc_name) [success, status] = self.container_status(lxc_name) if not success: return [False, status] if status == 'running': logger.error("container %s is running, please stop it first" % lxc_name) self.imgmgr.detachFS(lxc_name) return [True, "detach container success"] # check container: check LV and mountpoints, if wrong, try to repair it def check_container(self, lxc_name): logger.info ("check container:%s" % lxc_name) if not check_volume("docklet-group", lxc_name): logger.error("check container %s failed" % lxc_name) return [False, "check container failed"] #status = subprocess.call([self.libpath+"/lxc_control.sh", "check", lxc_name]) self.imgmgr.checkFS(lxc_name) logger.info ("check container %s success" % lxc_name) return [True, "check container success"] def is_container(self, lxc_name): if lxc.Container(lxc_name).defined: return True else: return False def container_status(self, lxc_name): if not self.is_container(lxc_name): return [False, "container not found"] c = lxc.Container(lxc_name) if c.running: return [True, 'running'] else: return [True, 'stopped'] def list_containers(self): lxclist = [] for c in lxc.list_containers(as_object=True): lxclist.append(c.name) return [True, lxclist] def delete_allcontainers(self): logger.info ("deleting all containers...") [status, containers] = self.list_containers() result = True for container in containers: [result, status] = self.container_status(container) if status=='running': self.stop_container(container) result = result & self.delete_container(container)[0] if result: logger.info ("deleted all containers success") return [True, 'all deleted'] else: logger.error ("deleted all containers failed") return [False, 'some containers delete failed'] # list containers in /var/lib/lxc/ as local # list containers in FS_PREFIX/global/... on this host as global def diff_containers(self): [status, localcontainers] = self.list_containers() containers = model.Container.query.all() globalcontainers = [] for con in containers: if con.host == self.addr: globalcontainers.append(con.containername) both = [] onlylocal = [] onlyglobal = [] for container in localcontainers: if container in globalcontainers: both.append(container) else: onlylocal.append(container) for container in globalcontainers: if container not in localcontainers: onlyglobal.append(container) return [both, onlylocal, onlyglobal] def create_image(self,username,imagename,containername,description="not thing",imagenum=10): return self.imgmgr.createImage(username,imagename,containername,description,imagenum) def update_basefs(self,imagename): return self.imgmgr.update_basefs(imagename) # check all local containers def check_allcontainers(self): [both, onlylocal, onlyglobal] = self.diff_containers() logger.info("check all containers and repair them") status = True result = True for container in both: logger.info ("%s in LOCAL and GLOBAL checks..." % container) [status, meg]=self.check_container(container) result = result & status if len(onlylocal) > 0: result = False logger.error ("some container only exists in LOCAL: %s" % onlylocal) if len(onlyglobal) > 0: result = False logger.error ("some container only exists in GLOBAL: %s" % onlyglobal) if status: logger.info ("check all containers success") return [True, 'all is ok'] else: logger.error ("check all containers failed") return [False, 'not ok'] ================================================ FILE: src/worker/monitor.py ================================================ #!/usr/bin/python3 ''' Monitor for Docklet Description:Monitor system for docklet will collect data on resources usages and status of vnode and phyiscal machines. And master can fetch these data and then show them on the web page. Besides, Monitor will also bill the vnodes according to their resources usage amount. Design:Monitor mainly consists of three parts: Collectors, Master_Collector and Fetchers. 1.Collectors will collect data every two seconds on each worker. And 'Container_Collector' will collect data of containers(vnodes), while 'Collector' will collect data of physical machines. 2.'Master_Collector' only runs on Master. It fetches the data on workers every two seconds by rpc and stores them in the memory of Master. 3.Fetchers are classes that Master will use them to fetch specific data in the memory and then show them on the web. 'Container_Fetcher' is the class to fetch the containers data in 'monitor_vnodes', while 'Fetcher' is the class to fetch the data of physical machines in 'monitor_hosts'. ''' import subprocess,re,os,psutil,math,sys import time,threading,json,traceback,platform from utils import env, etcdlib, gputools import lxc import xmlrpc.client from datetime import datetime from utils.model import db,VNode,History,BillingHistory,VCluster,PortMapping from utils.log import logger from httplib2 import Http from urllib.parse import urlencode # billing parameters a_cpu = 500 # seconds b_mem = 2000000 # MB c_disk = 4000 # MB d_port = 1 # major dict to store the monitoring data on Worker # only use on Worker # workerinfo: only store the data collected on current Worker, # has the first keys same as the second keys in monitor_hosts. workerinfo = {} # workercinfo: only store the data collected on current Worker, # use the names of vnodes(containers) as second key. # has the second keys same as the third keys in monitor_vnodes. workercinfo = {} # store the network statistics of users' gateways on current Worker. # key is username # bytes_sent and bytes_recv are the second keys gateways_stats = {} # only use on worker containerpids = [] pid2name = {} G_masterip = "" # only use on worker laststopcpuval = {} laststopruntime = {} lastbillingtime = {} # increment has keys: lastcputime,memincrement. # record the cpu val at last billing time and accumulate the memory usages during this billing hour. increment = {} # send http request to master def request_master(url,data): global G_masterip header = {'Content-Type':'application/x-www-form-urlencoded'} http = Http() [resp,content] = http.request("http://"+G_masterip+url,"POST",urlencode(data),headers = header) logger.info("response from master:"+content.decode('utf-8')) # The class is to collect data of containers on each worker class Container_Collector(threading.Thread): def __init__(self,test=False): global laststopcpuval global workercinfo threading.Thread.__init__(self) self.thread_stop = False self.interval = 2 self.billingtime = 3600 # billing interval self.test = test self.cpu_last = {} self.cpu_quota = {} self.mem_quota = {} self.net_stats = {} self.cores_num = int(subprocess.getoutput("grep processor /proc/cpuinfo | wc -l")) containers = self.list_container() for container in containers: # recovery if not container == '': try: vnode = VNode.query.get(container) laststopcpuval[container] = vnode.laststopcpuval laststopruntime[container] = vnode.laststopruntime workercinfo[container] = {} workercinfo[container]['basic_info'] = {} workercinfo[container]['basic_info']['billing'] = vnode.billing workercinfo[container]['basic_info']['billing_history'] = get_billing_history(container) workercinfo[container]['basic_info']['RunningTime'] = vnode.laststopruntime workercinfo[container]['basic_info']['a_cpu'] = a_cpu workercinfo[container]['basic_info']['b_mem'] = b_mem workercinfo[container]['basic_info']['c_disk'] = c_disk workercinfo[container]['basic_info']['d_port'] = d_port except: laststopcpuval[container] = 0 laststopruntime[container] = 0 return # list containers on this worker def list_container(self): output = subprocess.check_output(["sudo lxc-ls"],shell=True) output = output.decode('utf-8') containers = re.split('\s+',output) return containers # get running time of a process, return seconds def get_proc_etime(self,pid): fmt = subprocess.getoutput("ps -A -opid,etime | grep '^ *%d ' | awk '{print $NF}'" % pid).strip() if fmt == '': return -1 parts = fmt.split('-') days = int(parts[0]) if len(parts) == 2 else 0 fmt = parts[-1] parts = fmt.split(':') hours = int(parts[0]) if len(parts) == 3 else 0 parts = parts[len(parts)-2:] minutes = int(parts[0]) seconds = int(parts[1]) return ((days * 24 + hours) * 60 + minutes) * 60 + seconds # compute the billing val this running hour # if isreal is True, it will also make users' beans decrease to pay for the bill. # return the billing value in this running hour @classmethod def billing_increment(cls,vnode_name,isreal=True): global increment global workercinfo global G_masterip global a_cpu global b_mem global c_disk global d_port cpu_val = '0' if vnode_name not in workercinfo.keys(): return {'total': 0} if 'cpu_use' in workercinfo[vnode_name].keys(): cpu_val = workercinfo[vnode_name]['cpu_use']['val'] if vnode_name not in increment.keys(): increment[vnode_name] = {} increment[vnode_name]['lastcputime'] = cpu_val increment[vnode_name]['memincrement'] = 0 # compute cpu used time during this running hour cpu_increment = float(cpu_val) - float(increment[vnode_name]['lastcputime']) #logger.info("billing:"+str(cpu_increment)+" "+str(increment[container_name]['lastcputime'])) if cpu_increment == 0.0: avemem = 0 else: # avemem = (average memory used) * (cpu used time) avemem = cpu_increment*float(increment[vnode_name]['memincrement'])/1800.0 if 'disk_use' in workercinfo[vnode_name].keys(): disk_quota = workercinfo[vnode_name]['disk_use']['total'] else: disk_quota = 0 # get ports ports_count = count_port_mapping(vnode_name) # billing value = cpu used/a + memory used/b + disk quota/c + ports billing = {} billing['cpu'] = round(cpu_increment/a_cpu, 2) billing['cpu_time'] = round(cpu_increment, 2) billing['mem'] = round(avemem/b_mem, 2) billing['mem_use'] = round(avemem, 2) billing['disk'] = round(float(disk_quota)/1024.0/1024.0/c_disk, 2) billing['disk_use'] = round(float(disk_quota)/1024.0/1024.0, 2) billing['port'] = round(ports_count/d_port, 2) billing['port_use'] = ports_count billing['total'] = math.ceil(billing['cpu'] + billing['mem'] + billing['disk'] + billing['port']) billingval = billing['total'] if billingval > 100: # report outsize billing value logger.info("Huge Billingval for "+vnode_name+". cpu_increment:"+str(cpu_increment)+" avemem:"+str(avemem)+" disk:"+str(disk_quota)+"\n") if not isreal: # only compute return billing # initialize increment for next billing increment[vnode_name]['lastcputime'] = cpu_val increment[vnode_name]['memincrement'] = 0 if 'basic_info' not in workercinfo[vnode_name].keys(): workercinfo[vnode_name]['basic_info'] = {} workercinfo[vnode_name]['basic_info']['billing'] = 0 workercinfo[vnode_name]['basic_info']['RunningTime'] = 0 # update monitoring data nowbillingval = workercinfo[vnode_name]['basic_info']['billing'] nowbillingval += billingval workercinfo[vnode_name]['basic_info']['billing'] = nowbillingval workercinfo[vnode_name]['basic_info']['billing_history'] = get_billing_history(vnode_name) workercinfo[vnode_name]['basic_info']['billing_history']['cpu'] += billing['cpu'] workercinfo[vnode_name]['basic_info']['billing_history']['mem'] += billing['mem'] workercinfo[vnode_name]['basic_info']['billing_history']['disk'] += billing['disk'] workercinfo[vnode_name]['basic_info']['billing_history']['port'] += billing['port'] # update vnodes billing history save_billing_history(vnode_name, workercinfo[vnode_name]['basic_info']['billing_history']) # update vnodes' tables in database try: vnode = VNode.query.get(vnode_name) vnode.billing = nowbillingval except Exception as err: vnode = VNode(vnode_name) vnode.billing = nowbillingval db.session.add(vnode) logger.warning(err) try: db.session.commit() except Exception as err: db.session.rollback() logger.error(traceback.format_exc()) logger.error(err) raise # update users' tables in database owner_name = get_owner(vnode_name) auth_key = env.getenv('AUTH_KEY') data = {"owner_name":owner_name,"billing":billingval, "auth_key":auth_key} request_master("/billing/beans/",data) return billing # Collect net statistics of containers by psutil def collect_net_stats(self): raw_stats = psutil.net_io_counters(pernic=True) for key in raw_stats.keys(): if re.match('[\d]+-[\d]+',key) is not None: if key not in self.net_stats.keys(): self.net_stats[key] = {} self.net_stats[key]['bytes_sent'] = 0 self.net_stats[key]['bytes_recv'] = 0 self.net_stats[key]['bytes_recv_per_sec'] = round((int(raw_stats[key].bytes_sent) - self.net_stats[key]['bytes_recv']) / self.interval) self.net_stats[key]['bytes_sent_per_sec'] = round((int(raw_stats[key].bytes_recv) - self.net_stats[key]['bytes_sent']) / self.interval) self.net_stats[key]['bytes_recv'] = int(raw_stats[key].bytes_sent) self.net_stats[key]['bytes_sent'] = int(raw_stats[key].bytes_recv) self.net_stats[key]['packets_recv'] = int(raw_stats[key].packets_sent) self.net_stats[key]['packets_sent'] = int(raw_stats[key].packets_recv) self.net_stats[key]['errin'] = int(raw_stats[key].errout) self.net_stats[key]['errout'] = int(raw_stats[key].errin) self.net_stats[key]['dropin'] = int(raw_stats[key].dropout) self.net_stats[key]['dropout'] = int(raw_stats[key].dropin) else: if key not in gateways_stats.keys(): gateways_stats[key] = {} gateways_stats[key]['bytes_recv'] = int(raw_stats[key].bytes_sent) gateways_stats[key]['bytes_sent'] = int(raw_stats[key].bytes_recv) gateways_stats[key]['bytes_total'] = gateways_stats[key]['bytes_recv'] + gateways_stats[key]['bytes_sent'] #logger.info(self.net_stats) # the main function to collect monitoring data of a container def collect_containerinfo(self,container_name): global workerinfo global workercinfo global increment global lastbillingtime global containerpids global pid2name global laststopcpuval global laststopruntime is_batch = container_name.split('-')[1] == 'batch' # collect basic information, such as running time,state,pid,ip,name container = lxc.Container(container_name) basic_info = {} basic_exist = 'basic_info' in workercinfo[container_name].keys() if basic_exist: basic_info = workercinfo[container_name]['basic_info'] else: basic_info['RunningTime'] = 0 basic_info['billing'] = 0 if 'billing_this_hour' not in basic_info.keys(): basic_info['billing_this_hour'] = {'total': 0} basic_info['Name'] = container_name basic_info['State'] = container.state #if basic_exist: # logger.info(workercinfo[container_name]['basic_info']) if(container.state == 'STOPPED'): workercinfo[container_name]['basic_info'] = basic_info #logger.info(basic_info) return False container_pid_str = str(container.init_pid) if not container_pid_str in containerpids: containerpids.append(container_pid_str) pid2name[container_pid_str] = container_name running_time = self.get_proc_etime(container.init_pid) if not is_batch: running_time += laststopruntime[container_name] basic_info['PID'] = container_pid_str basic_info['IP'] = container.get_ips()[0] basic_info['RunningTime'] = running_time workercinfo[container_name]['basic_info'] = basic_info # deal with cpu used value cpu_val = float("%.2f" % (float(container.get_cgroup_item("cpuacct.usage")) / 1000000000)) cpu_unit = "seconds" if not container_name in self.cpu_last.keys(): # read quota from config of container confpath = "/var/lib/lxc/%s/config"%(container_name) if os.path.exists(confpath): confile = open(confpath,'r') res = confile.read() lines = re.split('\n',res) for line in lines: words = re.split('=',line) key = words[0].strip() if key == "lxc.cgroup.memory.limit_in_bytes": # get memory quota, change unit to KB self.mem_quota[container_name] = float(words[1].strip().strip("M"))*1000000/1024 elif key == "lxc.cgroup.cpu.cfs_quota_us": # get cpu quota, change unit to cores tmp = int(words[1].strip()) if tmp < 0: self.cpu_quota[container_name] = self.cores_num else: self.cpu_quota[container_name] = tmp/100000.0 quota = {'cpu':self.cpu_quota[container_name],'memory':self.mem_quota[container_name]} #logger.info(quota) workercinfo[container_name]['quota'] = quota else: logger.error("Cant't find config file %s"%(confpath)) return False self.cpu_last[container_name] = 0 # compute cpu used percent cpu_use = {} lastval = 0 try: if not is_batch: lastval = laststopcpuval[container_name] except: logger.warning(traceback.format_exc()) cpu_val += lastval cpu_use['val'] = cpu_val cpu_use['unit'] = cpu_unit cpu_usedp = (float(cpu_val)-float(self.cpu_last[container_name]))/(self.cpu_quota[container_name]*self.interval*1.05) cpu_use['hostpercent'] = (float(cpu_val)-float(self.cpu_last[container_name]))/(self.cores_num*self.interval*1.05) if(cpu_usedp > 1 or cpu_usedp < 0): cpu_usedp = 1 cpu_use['usedp'] = cpu_usedp self.cpu_last[container_name] = cpu_val; workercinfo[container_name]['cpu_use'] = cpu_use if container_name not in increment.keys(): # initialize increment increment[container_name] = {} increment[container_name]['lastcputime'] = cpu_val increment[container_name]['memincrement'] = 0 # deal with memory used data memory = float(container.get_cgroup_item("memory.usage_in_bytes")) increment[container_name]['memincrement'] += memory / 1024 / 1024 mem_val = memory / 1024 mem_unit = 'KiB' if mem_val > 1024: mem_val /= 1024 mem_unit = 'MiB' if mem_val > 1024: mem_val /= 1024 mem_unit = 'GiB' mem_use = {} mem_use['val'] = float("%.2f" % mem_val) mem_use['unit'] = mem_unit mem_use['usedp'] = memory / 1024 / self.mem_quota[container_name] workercinfo[container_name]['mem_use'] = mem_use # compute billing value during this running hour up to now workercinfo[container_name]['basic_info']['billing_this_hour'] = self.billing_increment(container_name,False) # deal with network used data containerids = re.split("-",container_name) if not is_batch and len(containerids) >= 3 and (containerids[1] + "-" + containerids[2]) in self.net_stats.keys(): workercinfo[container_name]['net_stats'] = self.net_stats[containerids[1] + '-' + containerids[2]] #logger.info(workercinfo[container_name]['net_stats']) if not container_name in lastbillingtime.keys(): lastbillingtime[container_name] = int(running_time/self.billingtime) lasttime = lastbillingtime[container_name] #logger.info(lasttime) # process real billing if running time reach an hour if not is_batch and not int(running_time/self.billingtime) == lasttime: #logger.info("billing:"+str(float(cpu_val))) lastbillingtime[container_name] = int(running_time/self.billingtime) self.billing_increment(container_name) #print(output) #print(parts) return True # run function in the thread def run(self): global workercinfo global workerinfo cnt = 0 while not self.thread_stop: self.collect_net_stats() containers = self.list_container() countR = 0 conlist = [] for container in containers: # collect data of each container if not container == '': conlist.append(container) if not container in workercinfo.keys(): workercinfo[container] = {} try: success= self.collect_containerinfo(container) if(success): countR += 1 except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) containers_num = len(containers)-1 concnt = {} concnt['total'] = containers_num concnt['running'] = countR workerinfo['containers'] = concnt time.sleep(self.interval) if cnt == 0: # update containers list on the worker each 5 times workerinfo['containerslist'] = conlist cnt = (cnt+1)%5 if self.test: break return def stop(self): self.thread_stop = True # the class is to colect monitoring data of the worker class Collector(threading.Thread): def __init__(self,test=False): global workerinfo threading.Thread.__init__(self) self.thread_stop = False self.interval = 1 self.test=test self.gpu_info_count = 0 self.gpu_info_cache = None workerinfo['concpupercent'] = {} return # collect memory used information def collect_meminfo(self): meminfo = psutil.virtual_memory() memdict = {} memdict['total'] = meminfo.total/1024 memdict['used'] = meminfo.used/1024 memdict['free'] = meminfo.free/1024 memdict['buffers'] = meminfo.buffers/1024 memdict['cached'] = meminfo.cached/1024 memdict['percent'] = meminfo.percent #print(output) #print(memparts) return memdict # collect cpu used information and processors information def collect_cpuinfo(self): cpuinfo = psutil.cpu_times_percent(interval=1,percpu=False) cpuset = {} cpuset['user'] = cpuinfo.user cpuset['system'] = cpuinfo.system cpuset['idle'] = cpuinfo.idle cpuset['iowait'] = cpuinfo.iowait # get processors information from /proc/cpuinfo output = subprocess.check_output(["cat /proc/cpuinfo"],shell=True) output = output.decode('utf-8') parts = output.split('\n') info = [] idx = -1 for part in parts: if not part == '': key_val = re.split(':',part) key = key_val[0].rstrip() if key == 'processor': info.append({}) idx += 1 val = key_val[1].lstrip() if key=='processor' or key=='model name' or key=='core id' or key=='cpu MHz' or key=='cache size' or key=='physical id': info[idx][key] = val return [cpuset, info] # collect gpu used information def collect_gpuinfo(self): # load gpu price batch_gpu_billing = env.getenv("BATCH_GPU_BILLING") gpu_price = {} default_gpu_price = 100 # /cores*h if batch_gpu_billing: # examples: default:100,GeForce-GTX-1080-Ti:100,GeForce-GTX-2080-Ti:150,Tesla-V100-PCIE-16GB:200 billing_configs = batch_gpu_billing.split(',') for config in billing_configs: config_sp = config.split(':') if config_sp[0] == 'default': default_gpu_price = int(config_sp[1]) else: gpu_price[config_sp[0]] = int(config_sp[1]) # reload gpu info if self.gpu_info_count == 0 or self.gpu_info_cache is None: self.gpu_info_cache = gputools.get_gpu_status() gpu_names = gputools.get_gpu_names() for index in range(len(self.gpu_info_cache)): if index < len(gpu_names): self.gpu_info_cache[index]['name'] = gpu_names[index] self.gpu_info_cache[index]['price'] = gpu_price.get(gpu_names[index], default_gpu_price) self.gpu_info_count = (self.gpu_info_count + 1) % 5 return self.gpu_info_cache # collect disk used information def collect_diskinfo(self): global workercinfo parts = psutil.disk_partitions() setval = [] devices = {} for part in parts: # deal with each partition if not part.device in devices: devices[part.device] = 1 diskval = {} diskval['device'] = part.device diskval['mountpoint'] = part.mountpoint try: usage = psutil.disk_usage(part.mountpoint) diskval['total'] = usage.total diskval['used'] = usage.used diskval['free'] = usage.free diskval['percent'] = usage.percent if(part.mountpoint.startswith('/opt/docklet/local/volume')): # the mountpoint indicate that the data is the disk used information of a container names = re.split('/',part.mountpoint) container = names[len(names)-1] if not container in workercinfo.keys(): workercinfo[container] = {} workercinfo[container]['disk_use'] = diskval setval.append(diskval) # make a list except Exception as err: logger.warning(traceback.format_exc()) logger.warning(err) #print(output) #print(diskparts) return setval # collect operating system information def collect_osinfo(self): uname = platform.uname() osinfo = {} osinfo['platform'] = platform.platform() osinfo['system'] = uname.system osinfo['node'] = uname.node osinfo['release'] = uname.release osinfo['version'] = uname.version osinfo['machine'] = uname.machine osinfo['processor'] = uname.processor return osinfo # run function in the thread def run(self): global workerinfo workerinfo['osinfo'] = self.collect_osinfo() while not self.thread_stop: workerinfo['meminfo'] = self.collect_meminfo() [cpuinfo,cpuconfig] = self.collect_cpuinfo() workerinfo['cpuinfo'] = cpuinfo workerinfo['cpuconfig'] = cpuconfig workerinfo['gpuinfo'] = self.collect_gpuinfo() workerinfo['diskinfo'] = self.collect_diskinfo() workerinfo['running'] = True time.sleep(self.interval) if self.test: break # print(self.etcdser.getkey('/meminfo/total')) return def stop(self): self.thread_stop = True # the function used by rpc to fetch data from worker def workerFetchInfo(master_ip): global workerinfo global workercinfo global gateways_stats global G_masterip # tell the worker the ip address of the master G_masterip = master_ip return str([workerinfo, workercinfo, gateways_stats]) # get owner name of a container def get_owner(container_name): names = container_name.split('-') return names[0] # get cluster id of a container def get_cluster(container_name): names = container_name.split('-') return names[1] def count_port_mapping(vnode_name): pms = PortMapping.query.filter_by(node_name=vnode_name).all() return len(pms) def save_billing_history(vnode_name, billing_history): vnode_cluster_id = get_cluster(vnode_name) try: vcluster = VCluster.query.get(int(vnode_cluster_id)) billinghistory = BillingHistory.query.get(vnode_name) if billinghistory is not None: billinghistory.cpu = billing_history["cpu"] billinghistory.mem = billing_history["mem"] billinghistory.disk = billing_history["disk"] billinghistory.port = billing_history["port"] else: billinghistory = BillingHistory(vnode_name,billing_history["cpu"],billing_history["mem"],billing_history["disk"],billing_history["port"]) vcluster.billing_history.append(billinghistory) db.session.add(vcluster) db.session.commit() except Exception as err: db.session.rollback() logger.error(traceback.format_exc()) return def get_billing_history(vnode_name): billinghistory = BillingHistory.query.get(vnode_name) if billinghistory is not None: return dict(eval(str(billinghistory))) else: default = {} default['cpu'] = 0 default['mem'] = 0 default['disk'] = 0 default['port'] = 0 return default # To record data when the status of containers change class History_Manager: def __init__(self): try: VNode.query.all() History.query.all() except: db.create_all(bind='__all__') def getAll(self): return History.query.all() # log to the database, it will record runnint time, cpu time, billing val and action # action may be 'Create', 'Stop', 'Start', 'Recover', 'Delete' def log(self,vnode_name,action): global workercinfo global laststopcpuval res = VNode.query.filter_by(name=vnode_name).first() if res is None: vnode = VNode(vnode_name) vnode.histories = [] db.session.add(vnode) try: db.session.commit() except Exception as err: db.session.rollback() logger.error(traceback.format_exc()) vnode = VNode.query.get(vnode_name) billing = 0 cputime = 0 runtime = 0 owner = get_owner(vnode_name) try: billing = int(workercinfo[vnode_name]['basic_info']['billing']) except: billing = 0 try: cputime = float(workercinfo[vnode_name]['cpu_use']['val']) except: cputime = 0.0 try: runtime = float(workercinfo[vnode_name]['basic_info']['RunningTime']) except Exception as err: #print(traceback.format_exc()) runtime = 0 history = History(action,runtime,cputime,billing) vnode.histories.append(history) if action == 'Stop' or action == 'Create': laststopcpuval[vnode_name] = cputime vnode.laststopcpuval = cputime laststopruntime[vnode_name] = runtime vnode.laststopruntime = runtime db.session.add(history) try: db.session.commit() except Exception as err: db.session.rollback() logger.error(traceback.format_exc()) def getHistory(self,vnode_name): vnode = VNode.query.filter_by(name=vnode_name).first() if vnode is None: return [] else: res = History.query.filter_by(vnode=vnode_name).all() return list(eval(str(res))) # get all created containers(including those have been deleted) of a owner def getCreatedVNodes(self,owner): vnodes = VNode.query.filter(VNode.name.startswith(owner)).all() res = [] for vnode in vnodes: tmp = {"name":vnode.name,"billing":vnode.billing} res.append(tmp) return res ================================================ FILE: src/worker/ossmounter.py ================================================ import abc import subprocess, os from utils.log import logger class OssMounter(object): __metaclass__ = abc.ABCMeta @staticmethod def execute_cmd(cmd): ret = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) if ret.returncode != 0: msg = ret.stdout.decode(encoding="utf-8") logger.error(msg) return [False,msg] else: return [True,""] @staticmethod @abc.abstractmethod def mount_oss(datapath, mount_info): # mount oss pass @staticmethod @abc.abstractmethod def umount_oss(datapath, mount_info): # umount oss pass class AliyunOssMounter(OssMounter): @staticmethod def mount_oss(datapath, mount_info): # mount oss try: pwdfile = open("/etc/passwd-ossfs","w") pwdfile.write(mount_info.remotePath+":"+mount_info.accessKey+":"+mount_info.secretKey+"\n") pwdfile.close() except Exception as err: logger.error(traceback.format_exc()) return [False,msg] cmd = "chmod 640 /etc/passwd-ossfs" [success1, msg] = OssMounter.execute_cmd(cmd) if not success1: logger.error("Aliyun OSS mount chmod err:%s" % msg) return [False, msg] mountpath = datapath+"/Aliyun/"+mount_info.remotePath logger.info("Mount oss %s %s" % (mount_info.remotePath, mountpath)) if not os.path.isdir(mountpath): os.makedirs(mountpath) cmd = "ossfs %s %s -ourl=%s" % (mount_info.remotePath, mountpath, mount_info.other) [success, msg] = OssMounter.execute_cmd(cmd) if not success: logger.error("Aliyun OSS mount err:%s" % msg) return [False, msg] return [True,""] @staticmethod def umount_oss(datapath, mount_info): mountpath = datapath+"/Aliyun/"+mount_info.remotePath logger.info("UMount oss %s %s" % (mount_info.remotePath, mountpath)) cmd = "fusermount -u %s" % (mountpath) [success, msg] = OssMounter.execute_cmd(cmd) if not success: logger.error("Aliyun OSS umount err:%s"%msg) return [False,msg] [success, msg] = OssMounter.execute_cmd("rm -rf %s" % mountpath) if not success: logger.error("Aliyun OSS umount err:%s"%msg) return [False,msg] return [True,""] ================================================ FILE: src/worker/taskcontroller.py ================================================ #!/usr/bin/python3 import sys if sys.path[0].endswith("worker"): sys.path[0] = sys.path[0][:-6] from utils import env, tools config = env.getenv("CONFIG") #config = "/opt/docklet/local/docklet-running.conf" tools.loadenv(config) from utils.log import initlogging initlogging("docklet-taskcontroller") from utils.log import logger from concurrent import futures import grpc #from utils.log import logger #from utils import env import json,lxc,subprocess,threading,os,time,traceback from utils import imagemgr,etcdlib,gputools from utils.lvmtool import sys_run from worker import ossmounter from protos import rpc_pb2, rpc_pb2_grpc _ONE_DAY_IN_SECONDS = 60 * 60 * 24 MAX_RUNNING_TIME = _ONE_DAY_IN_SECONDS def ip_to_int(addr): [a, b, c, d] = addr.split('.') return (int(a)<<24) + (int(b)<<16) + (int(c)<<8) + int(d) def int_to_ip(num): return str((num>>24)&255)+"."+str((num>>16)&255)+"."+str((num>>8)&255)+"."+str(num&255) class TaskController(rpc_pb2_grpc.WorkerServicer): def __init__(self): rpc_pb2_grpc.WorkerServicer.__init__(self) etcdaddr = env.getenv("ETCD") logger.info ("using ETCD %s" % etcdaddr ) clustername = env.getenv("CLUSTER_NAME") logger.info ("using CLUSTER_NAME %s" % clustername ) # init etcdlib client try: self.etcdclient = etcdlib.Client(etcdaddr, prefix = clustername) except Exception: logger.error ("connect etcd failed, maybe etcd address not correct...") sys.exit(1) else: logger.info("etcd connected") # get master ip and report port [success,masterip] = self.etcdclient.getkey("service/master") if not success: logger.error("Fail to get master ip address.") sys.exit(1) else: self.master_ip = masterip logger.info("Get master ip address: %s" % (self.master_ip)) self.master_port = env.getenv('BATCH_MASTER_PORT') self.imgmgr = imagemgr.ImageMgr() self.fspath = env.getenv('FS_PREFIX') self.confpath = env.getenv('DOCKLET_CONF') self.taskmsgs = [] self.msgslock = threading.Lock() self.report_interval = 2 self.lock = threading.Lock() self.mount_lock = threading.Lock() self.cons_gateway = env.getenv('BATCH_GATEWAY') self.cons_ips = env.getenv('BATCH_NET') logger.info("Batch gateway ip address %s" % self.cons_gateway) logger.info("Batch ip pools %s" % self.cons_ips) self.cidr = 32 - int(self.cons_ips.split('/')[1]) self.ipbase = ip_to_int(self.cons_ips.split('/')[0]) self.free_ips = [] for i in range(2, (1 << self.cidr) - 1): self.free_ips.append(i) logger.info("Free ip addresses pool %s" % str(self.free_ips)) self.gpu_lock = threading.Lock() self.gpu_status = {} gpus = gputools.get_gpu_status() for gpu in gpus: self.gpu_status[gpu['id']] = "" self.start_report() logger.info('TaskController init success') # Need Locks def acquire_ip(self): self.lock.acquire() if len(self.free_ips) == 0: return [False, "No free ips"] ip = int_to_ip(self.ipbase + self.free_ips[0]) self.free_ips.remove(self.free_ips[0]) logger.info(str(self.free_ips)) self.lock.release() return [True, ip + "/" + str(32 - self.cidr)] # Need Locks def release_ip(self,ipstr): self.lock.acquire() ipnum = ip_to_int(ipstr.split('/')[0]) - self.ipbase self.free_ips.append(ipnum) logger.info(str(self.free_ips)) self.lock.release() def add_gpu_device(self, lxcname, gpu_need): if gpu_need < 1: return [True, ""] self.gpu_lock.acquire() use_gpus = [] for gpuid in self.gpu_status.keys(): if self.gpu_status[gpuid] == "" and gpu_need > 0: use_gpus.append(gpuid) gpu_need -= 1 if gpu_need > 0: self.gpu_lock.release() return [False, "No free GPUs"] for gpuid in use_gpus: self.gpu_status[gpuid] = lxcname try: gputools.add_device(lxcname, "/dev/nvidiactl") gputools.add_device(lxcname, "/dev/nvidia-uvm") for gpuid in use_gpus: gputools.add_device(lxcname,"/dev/nvidia"+str(gpuid)) logger.info("Add gpu:"+str(gpuid) +" to lxc:"+str(lxcname)) except Exception as e: logger.error(traceback.format_exc()) for gpuid in use_gpus: self.gpu_status[gpuid] = "" self.gpu_lock.release() return [False, "Error occurs when adding gpu device."] self.gpu_lock.release() return [True, ""] def release_gpu_device(self, lxcname): self.gpu_lock.acquire() for gpuid in self.gpu_status.keys(): if self.gpu_status[gpuid] == lxcname: self.gpu_status[gpuid] = "" self.gpu_lock.release() #mount_oss def mount_oss(self, datapath, mount_info): self.mount_lock.acquire() try: for mount in mount_info: provider = mount.provider mounter = getattr(ossmounter,provider+"OssMounter",None) if mounter is None: self.mount_lock.release() return [False, provider + " doesn't exist!"] [success, msg] = mounter.mount_oss(datapath,mount) if not success: self.mount_lock.release() return [False, msg] except Exception as err: self.mount_lock.release() logger.error(traceback.format_exc()) return [False,""] self.mount_lock.release() return [True,""] #umount oss def umount_oss(self, datapath, mount_info): try: for mount in mount_info: provider = mount.provider mounter = getattr(ossmounter,provider+"OssMounter",None) if mounter is None: return [False, provider + " doesn't exist!"] [success, msg] = mounter.umount_oss(datapath,mount) if not success: return [False, msg] except Exception as err: logger.error(traceback.format_exc()) return [False,""] #accquire ip and create a container def create_container(self,instanceid,username,image,lxcname,quota): # acquire ip [status, ip] = self.acquire_ip() if not status: return [False, ip] # prepare image and filesystem status = self.imgmgr.prepareFS(username,image,lxcname,str(quota.disk)) if not status: self.release_ip(ip) return [False, "Create container for batch failed when preparing filesystem"] rootfs = "/var/lib/lxc/%s/rootfs" % lxcname if not os.path.isdir("%s/global/users/%s" % (self.fspath,username)): path = env.getenv('DOCKLET_LIB') subprocess.call([path+"/master/userinit.sh", username]) logger.info("user %s directory not found, create it" % username) sys_run("mkdir -p /var/lib/lxc/%s" % lxcname) logger.info("generate config file for %s" % lxcname) def config_prepare(content): content = content.replace("%ROOTFS%",rootfs) content = content.replace("%HOSTNAME%","batch-%s" % str(instanceid)) content = content.replace("%CONTAINER_MEMORY%",str(quota.memory)) content = content.replace("%CONTAINER_CPU%",str(quota.cpu*100000)) content = content.replace("%FS_PREFIX%",self.fspath) content = content.replace("%LXCSCRIPT%",env.getenv("LXC_SCRIPT")) content = content.replace("%USERNAME%",username) content = content.replace("%LXCNAME%",lxcname) content = content.replace("%IP%",ip) content = content.replace("%GATEWAY%",self.cons_gateway) return content logger.info(self.confpath) conffile = open(self.confpath+"/container.batch.conf", 'r') conftext = conffile.read() conffile.close() conftext = config_prepare(conftext) conffile = open("/var/lib/lxc/%s/config" % lxcname, 'w') conffile.write(conftext) conffile.close() return [True, ip] def process_task(self, request, context): logger.info('excute task with parameter: ' + str(request)) taskid = request.id instanceid = request.instanceid # get config from request command = request.parameters.command.commandLine #'/root/getenv.sh' #parameter['Parameters']['Command']['CommandLine'] #envs = {'MYENV1':'MYVAL1', 'MYENV2':'MYVAL2'} #parameters['Parameters']['Command']['EnvVars'] pkgpath = request.parameters.command.packagePath envs = request.parameters.command.envVars envs['taskid'] = str(taskid) envs['instanceid'] = str(instanceid) image = {} image['name'] = request.cluster.image.name if request.cluster.image.type == rpc_pb2.Image.PRIVATE: image['type'] = 'private' elif request.cluster.image.type == rpc_pb2.Image.PUBLIC: image['type'] = 'public' else: image['type'] = 'base' image['owner'] = request.cluster.image.owner username = request.username token = request.token lxcname = '%s-batch-%s-%s-%s' % (username,taskid,str(instanceid),token) instance_type = request.cluster.instance mount_list = request.cluster.mount outpath = [request.parameters.stdoutRedirectPath,request.parameters.stderrRedirectPath] timeout = request.timeout gpu_need = int(request.cluster.instance.gpu) reused = request.reused #create container [success, ip] = self.create_container(instanceid, username, image, lxcname, instance_type) if not success: return rpc_pb2.Reply(status=rpc_pb2.Reply.REFUSED, message=ip) #mount oss self.mount_oss("%s/global/users/%s/oss" % (self.fspath,username), mount_list) conffile = open("/var/lib/lxc/%s/config" % lxcname, 'a+') mount_str = "lxc.mount.entry = %s/global/users/%s/oss/%s %s/root/oss/%s none bind,rw,create=dir 0 0" for mount in mount_list: conffile.write("\n"+ mount_str % (self.fspath, username, mount.remotePath, rootfs, mount.remotePath)) conffile.close() logger.info("Start container %s..." % lxcname) #container = lxc.Container(lxcname) ret = subprocess.run('lxc-start -n %s'%lxcname,stdout=subprocess.PIPE,stderr=subprocess.STDOUT, shell=True) if ret.returncode != 0: logger.error('start container %s failed' % lxcname) self.release_ip(ip) self.imgmgr.deleteFS(lxcname) return rpc_pb2.Reply(status=rpc_pb2.Reply.REFUSED,message="Can't start the container") logger.info('start container %s success' % lxcname) #add GPU [success, msg] = self.add_gpu_device(lxcname,gpu_need) if not success: logger.error("Fail to add gpu device. " + msg) container.stop() self.release_ip(ip) self.imgmgr.deleteFS(lxcname) return rpc_pb2.Reply(status=rpc_pb2.Reply.REFUSED,message="Fail to add gpu device. " + msg) thread = threading.Thread(target = self.execute_task, args=(username,taskid,instanceid,envs,lxcname,pkgpath,command,timeout,outpath,ip,token,mount_list)) thread.setDaemon(True) thread.start() return rpc_pb2.Reply(status=rpc_pb2.Reply.ACCEPTED,message="") def write_output(self,lxcname,tmplogpath,filepath): cmd = "lxc-attach -n " + lxcname + " -- mv %s %s" if filepath == "" or filepath == "/root/nfs/batch_{jobid}/" or os.path.abspath("/root/nfs/"+tmplogpath) == os.path.abspath(filepath): return [True,""] ret = subprocess.run(cmd % ("/root/nfs/"+tmplogpath,filepath),stdout=subprocess.PIPE,stderr=subprocess.STDOUT, shell=True) if ret.returncode != 0: msg = ret.stdout.decode(encoding="utf-8") logger.error(msg) return [False,msg] logger.info("Succeed to moving nfs/%s to %s" % (tmplogpath,filepath)) return [True,""] def execute_task(self,username,taskid,instanceid,envs,lxcname,pkgpath,command,timeout,outpath,ip,token,mount_info): lxcfspath = "/var/lib/lxc/"+lxcname+"/rootfs/" scriptname = "batch_job.sh" try: scriptfile = open(lxcfspath+"root/"+scriptname,"w") scriptfile.write("#!/bin/bash\n") scriptfile.write("cd "+str(pkgpath)+"\n") scriptfile.write(command) scriptfile.close() except Exception as err: logger.error(traceback.format_exc()) logger.error("Fail to write script file with taskid(%s) instanceid(%s)" % (str(taskid),str(instanceid))) else: try: job_id = taskid.split('_')[1] except Exception as e: logger.error(traceback.format_exc()) job_id = "_none" jobdir = "batch_" + job_id logdir = "%s/global/users/%s/data/" % (self.fspath,username) + jobdir if not os.path.exists(logdir): logger.info("Directory:%s not exists, create it." % logdir) os.mkdir(logdir) stdoutname = str(taskid)+"_"+str(instanceid)+"_stdout.txt" stderrname = str(taskid)+"_"+str(instanceid)+"_stderr.txt" try: stdoutfile = open(logdir+"/"+stdoutname,"w") stderrfile = open(logdir+"/"+stderrname,"w") logger.info("Create stdout(%s) and stderr(%s) file to log" % (stdoutname, stderrname)) except Exception as e: logger.error(traceback.format_exc()) stdoutfile = None stderrfile = None cmd = "lxc-attach -n " + lxcname for envkey,envval in envs.items(): cmd = cmd + " -v %s=%s" % (envkey,envval) cmd = cmd + " -- /bin/bash \"" + "/root/" + scriptname + "\"" logger.info('run task with command - %s' % cmd) p = subprocess.Popen(cmd,stdout=stdoutfile,stderr=stderrfile, shell=True) #logger.info(p) if timeout == 0: to = MAX_RUNNING_TIME else: to = timeout while p.poll() is None and to > 0: time.sleep(min(2,to)) to -= 2 if p.poll() is None: p.kill() logger.info("Running time(%d) is out. Task(%s-%s-%s) will be killed." % (timeout,str(taskid),str(instanceid),token)) self.add_msg(taskid,username,instanceid,rpc_pb2.TIMEOUT,token,"Running time is out.") else: [success1,msg1] = self.write_output(lxcname,jobdir+"/"+stdoutname,outpath[0]) [success2,msg2] = self.write_output(lxcname,jobdir+"/"+stderrname,outpath[1]) if not success1 or not success2: if not success1: msg = msg1 else: msg = msg2 logger.info("Output error on Task(%s-%s-%s)." % (str(taskid),str(instanceid),token)) self.add_msg(taskid,username,instanceid,rpc_pb2.OUTPUTERROR,token,msg) else: if p.poll() == 0: logger.info("Task(%s-%s-%s) completed." % (str(taskid),str(instanceid),token)) self.add_msg(taskid,username,instanceid,rpc_pb2.COMPLETED,token,"") else: logger.info("Task(%s-%s-%s) failed." % (str(taskid),str(instanceid),token)) self.add_msg(taskid,username,instanceid,rpc_pb2.FAILED,token,"") container = lxc.Container(lxcname) if container.stop(): logger.info("stop container %s success" % lxcname) else: logger.error("stop container %s failed" % lxcname) logger.info("deleting container:%s" % lxcname) if self.imgmgr.deleteFS(lxcname): logger.info("delete container %s success" % lxcname) else: logger.error("delete container %s failed" % lxcname) logger.info("release ip address %s" % ip) self.release_ip(ip) self.release_gpu_device(lxcname) #umount oss self.umount_oss("%s/global/users/%s/oss" % (self.fspath,username), mount_info) def stop_tasks(self, request, context): for msg in request.taskmsgs: lxcname = '%s-batch-%s-%s-%s' % (msg.username,msg.taskid,str(msg.instanceid),msg.token) logger.info("Stop the task with lxc:"+lxcname) subprocess.run("lxc-stop -k -n %s" % lxcname, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) return rpc_pb2.Reply(status=rpc_pb2.Reply.ACCEPTED,message="") def add_msg(self,taskid,username,instanceid,status,token,errmsg): self.msgslock.acquire() try: self.taskmsgs.append(rpc_pb2.TaskMsg(taskid=str(taskid),username=username,instanceid=int(instanceid),instanceStatus=status,token=token,errmsg=errmsg)) except Exception as err: logger.error(traceback.format_exc()) self.msgslock.release() #logger.info(str(self.taskmsgs)) def report_msg(self): channel = grpc.insecure_channel(self.master_ip+":"+self.master_port) stub = rpc_pb2_grpc.MasterStub(channel) while True: self.msgslock.acquire() reportmsg = rpc_pb2.ReportMsg(taskmsgs = self.taskmsgs) try: response = stub.report(reportmsg) logger.info("Response from master by reporting: "+str(response.status)+" "+response.message) except Exception as err: logger.error(traceback.format_exc()) self.taskmsgs = [] self.msgslock.release() time.sleep(self.report_interval) def start_report(self): thread = threading.Thread(target = self.report_msg, args=()) thread.setDaemon(True) thread.start() logger.info("Start to report task messages to master every %d seconds." % self.report_interval) def TaskControllerServe(): max_threads = int(env.getenv('BATCH_MAX_THREAD_WORKER')) worker_port = int(env.getenv('BATCH_WORKER_PORT')) logger.info("Max Threads on a worker is %d" % max_threads) server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_threads)) rpc_pb2_grpc.add_WorkerServicer_to_server(TaskController(), server) server.add_insecure_port('[::]:'+str(worker_port)) server.start() logger.info("Start TaskController Servicer on port:%d" % worker_port) try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: server.stop(0) if __name__ == "__main__": TaskControllerServe() ================================================ FILE: src/worker/taskworker.py ================================================ #!/usr/bin/python3 import sys if sys.path[0].endswith("worker"): sys.path[0] = sys.path[0][:-6] from utils import env, tools config = env.getenv("CONFIG") #config = "/opt/docklet/local/docklet-running.conf" tools.loadenv(config) from utils.log import initlogging initlogging("docklet-taskworker") from utils.log import logger from concurrent import futures import grpc #from utils.log import logger #from utils import env import json,lxc,subprocess,threading,os,time,traceback from utils import imagemgr,etcdlib,gputools from utils.lvmtool import sys_run from worker import ossmounter from protos import rpc_pb2, rpc_pb2_grpc from utils.nettools import netcontrol from master.network import getip _ONE_DAY_IN_SECONDS = 60 * 60 * 24 MAX_RUNNING_TIME = _ONE_DAY_IN_SECONDS class TaskWorker(rpc_pb2_grpc.WorkerServicer): def __init__(self): rpc_pb2_grpc.WorkerServicer.__init__(self) etcdaddr = env.getenv("ETCD") logger.info ("using ETCD %s" % etcdaddr ) clustername = env.getenv("CLUSTER_NAME") logger.info ("using CLUSTER_NAME %s" % clustername ) # init etcdlib client try: self.etcdclient = etcdlib.Client(etcdaddr, prefix = clustername) except Exception: logger.error ("connect etcd failed, maybe etcd address not correct...") sys.exit(1) else: logger.info("etcd connected") # get master ip and report port [success,masterip] = self.etcdclient.getkey("service/master") if not success: logger.error("Fail to get master ip address.") sys.exit(1) else: self.master_ip = masterip logger.info("Get master ip address: %s" % (self.master_ip)) self.master_port = env.getenv('BATCH_MASTER_PORT') # get worker ip self.worker_ip = getip(env.getenv('NETWORK_DEVICE')) logger.info("Worker ip is :%s"%self.worker_ip) self.imgmgr = imagemgr.ImageMgr() self.fspath = env.getenv('FS_PREFIX') self.confpath = env.getenv('DOCKLET_CONF') self.rm_all_batch_containers() self.taskmsgs = [] self.msgslock = threading.Lock() self.report_interval = 2 self.lock = threading.Lock() self.mount_lock = threading.Lock() self.gpu_lock = threading.Lock() self.gpu_status = {} gpus = gputools.get_gpu_status() for gpu in gpus: self.gpu_status[gpu['id']] = "" self.start_report() logger.info('TaskWorker init success') def stop_and_rm_containers(self,lxcname): logger.info("Stop the container with name:"+lxcname) subprocess.run("lxc-stop -k -n %s" % lxcname, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) lxcpath = "/var/lib/lxc/%s" % lxcname try: mount_info = [] for provider in os.listdir(lxcpath+"/oss"): for bkname in os.listdir(lxcpath+"/oss/"+provider): mount_info.append(rpc_pb2.Mount(provider=provider,remotePath=bkname)) self.umount_oss(lxcpath+"/oss", mount_info) except Exception as err: logger.info(err) pass return self.imgmgr.deleteFS(lxcname) def rm_all_batch_containers(self): for con in lxc.list_containers(): keys = con.split('-') if len(keys) < 2 or keys[1] != 'batch': continue if self.stop_and_rm_containers(con): logger.info("delete container %s success" % con) else: logger.error("delete container %s failed" % con) def add_gpu_device(self, lxcname, gpu_need): if gpu_need < 1: return [True, ""] self.gpu_lock.acquire() use_gpus = [] for gpuid in self.gpu_status.keys(): if self.gpu_status[gpuid] == "" and gpu_need > 0: use_gpus.append(gpuid) gpu_need -= 1 if gpu_need > 0: self.gpu_lock.release() return [False, "No free GPUs"] for gpuid in use_gpus: self.gpu_status[gpuid] = lxcname try: gputools.add_device(lxcname, "/dev/nvidiactl") gputools.add_device(lxcname, "/dev/nvidia-uvm") for gpuid in use_gpus: gputools.add_device(lxcname,"/dev/nvidia"+str(gpuid)) logger.info("Add gpu:"+str(gpuid) +" to lxc:"+str(lxcname)) except Exception as e: logger.error(traceback.format_exc()) for gpuid in use_gpus: self.gpu_status[gpuid] = "" self.gpu_lock.release() return [False, "Error occurs when adding gpu device."] self.gpu_lock.release() return [True, ""] def release_gpu_device(self, lxcname): self.gpu_lock.acquire() for gpuid in self.gpu_status.keys(): if self.gpu_status[gpuid] == lxcname: self.gpu_status[gpuid] = "" self.gpu_lock.release() #mount_oss def mount_oss(self, datapath, mount_info): self.mount_lock.acquire() try: for mount in mount_info: provider = mount.provider mounter = getattr(ossmounter,provider+"OssMounter",None) if mounter is None: self.mount_lock.release() return [False, provider + " doesn't exist!"] [success, msg] = mounter.mount_oss(datapath,mount) if not success: self.mount_lock.release() return [False, msg] except Exception as err: self.mount_lock.release() logger.error(traceback.format_exc()) return [False,""] self.mount_lock.release() return [True,""] #umount oss def umount_oss(self, datapath, mount_info): try: for mount in mount_info: provider = mount.provider mounter = getattr(ossmounter,provider+"OssMounter",None) if mounter is None: return [False, provider + " doesn't exist!"] [success, msg] = mounter.umount_oss(datapath,mount) if not success: return [False, msg] except Exception as err: logger.error(traceback.format_exc()) return [False,""] def start_vnode(self, request, context): logger.info('start vnode with config: ' + str(request)) taskid = request.taskid vnodeid = request.vnodeid envs = {} envs['taskid'] = str(taskid) envs['vnodeid'] = str(vnodeid) image = {} image['name'] = request.vnode.image.name if request.vnode.image.type == rpc_pb2.Image.PRIVATE: image['type'] = 'private' elif request.vnode.image.type == rpc_pb2.Image.PUBLIC: image['type'] = 'public' else: image['type'] = 'base' image['owner'] = request.vnode.image.owner username = request.username lxcname = '%s-batch-%s-%s' % (username,taskid,str(vnodeid)) instance_type = request.vnode.instance mount_list = request.vnode.mount gpu_need = int(request.vnode.instance.gpu) ipaddr = request.vnode.network.ipaddr gateway = request.vnode.network.gateway brname = request.vnode.network.brname masterip = request.vnode.network.masterip hostname = request.vnode.hostname #create container [success, msg] = self.create_container(taskid, vnodeid, username, image, lxcname, instance_type, ipaddr, gateway, brname, hostname) if not success: return rpc_pb2.Reply(status=rpc_pb2.Reply.REFUSED, message=msg) #mount oss lxcpath = "/var/lib/lxc/%s" % lxcname rootfs = lxcpath + "/rootfs" self.mount_oss(lxcpath + "/oss", mount_list) conffile = open(lxcpath + "/config", 'a+') mount_str = "lxc.mount.entry = "+ lxcpath +"/oss/%s/%s %s/root/oss/%s none bind,rw,create=dir 0 0" for mount in mount_list: conffile.write("\n"+ mount_str % (mount.provider, mount.remotePath, rootfs, mount.remotePath)) conffile.close() logger.info("Start container %s..." % lxcname) container = lxc.Container(lxcname) ret = subprocess.run('lxc-start -n %s'%lxcname,stdout=subprocess.PIPE,stderr=subprocess.STDOUT, shell=True) if ret.returncode != 0: logger.error('start container %s failed' % lxcname) self.umount_oss("/var/lib/lxc/%s/oss" % (lxcname), mount_list) self.imgmgr.deleteFS(lxcname) return rpc_pb2.Reply(status=rpc_pb2.Reply.REFUSED,message="Can't start the container(%s)"%lxcname) logger.info('start container %s success' % lxcname) if masterip != self.worker_ip: netcontrol.setup_gre(brname, masterip) #add GPU [success, msg] = self.add_gpu_device(lxcname,gpu_need) if not success: logger.error("Fail to add gpu device. " + msg) container.stop() self.umount_oss("/var/lib/lxc/%s/oss" % (lxcname), mount_list) self.imgmgr.deleteFS(lxcname) return rpc_pb2.Reply(status=rpc_pb2.Reply.REFUSED,message="Fail to add gpu device. " + msg) #start ssh service cmd = "lxc-attach -n %s -- service ssh start" % lxcname ret = subprocess.run(cmd,stdout=subprocess.PIPE,stderr=subprocess.STDOUT, shell=True) if ret.returncode != 0: logger.error('Fail to start ssh service of container %s' % lxcname) container.stop() self.umount_oss("/var/lib/lxc/%s/oss" % (lxcname), mount_list) self.imgmgr.deleteFS(lxcname) return rpc_pb2.Reply(status=rpc_pb2.Reply.REFUSED,message="Fail to start ssh service. lxc(%s)"%lxcname) return rpc_pb2.Reply(status=rpc_pb2.Reply.ACCEPTED,message="") def start_task(self, request, context): logger.info('start task with config: ' + str(request)) taskid = request.taskid username = request.username vnodeid = request.vnodeid # get config from request command = request.parameters.command.commandLine #'/root/getenv.sh' #parameter['Parameters']['Command']['CommandLine'] #envs = {'MYENV1':'MYVAL1', 'MYENV2':'MYVAL2'} #parameters['Parameters']['Command']['EnvVars'] pkgpath = request.parameters.command.packagePath envs = request.parameters.command.envVars envs['taskid'] = str(taskid) envs['vnodeid'] = str(vnodeid) timeout = request.timeout token = request.token outpath = [request.parameters.stdoutRedirectPath,request.parameters.stderrRedirectPath] lxcname = '%s-batch-%s-%s' % (username,taskid,str(vnodeid)) thread = threading.Thread(target = self.execute_task, args=(username,taskid,vnodeid,envs,lxcname,pkgpath,command,timeout,outpath,token)) thread.setDaemon(True) thread.start() return rpc_pb2.Reply(status=rpc_pb2.Reply.ACCEPTED,message="") def stop_task(self, request, context): logger.info('stop task with config: ' + str(request)) taskid = request.taskid username = request.username vnodeid = request.vnodeid lxcname = '%s-batch-%s-%s' % (username,taskid,str(vnodeid)) logger.info("Stop the task with lxc:"+lxcname) subprocess.run("lxc-stop -k -n %s" % lxcname, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) return rpc_pb2.Reply(status=rpc_pb2.Reply.ACCEPTED,message="") # stop and remove container def stop_vnode(self, request, context): logger.info('stop vnode with config: ' + str(request)) taskid = request.taskid username = request.username vnodeid = request.vnodeid brname = request.vnode.network.brname mount_list = request.vnode.mount lxcname = '%s-batch-%s-%s' % (username,taskid,str(vnodeid)) logger.info("Stop the task with lxc:"+lxcname) container = lxc.Container(lxcname) if container.stop(): logger.info("stop container %s success" % lxcname) else: logger.error("stop container %s failed" % lxcname) #umount oss self.umount_oss("/var/lib/lxc/%s/oss" % (lxcname), mount_list) logger.info("deleting container:%s" % lxcname) if self.imgmgr.deleteFS(lxcname): logger.info("delete container %s success" % lxcname) else: logger.error("delete container %s failed" % lxcname) #del ovs bridge if brname is not None: netcontrol.del_bridge(brname) #release gpu self.release_gpu_device(lxcname) return rpc_pb2.Reply(status=rpc_pb2.Reply.ACCEPTED,message="") def prepare_hook_conf(self, conf_path, env_dict): try: confile = open(conf_path, "w") for k,v in env_dict.items(): confile.write("%s=%s\n"%(k,v)) confile.close() except Exception as e: logger.error(traceback.format_exc()) return [False, e] return [True, ""] #accquire ip and create a container def create_container(self,taskid,vnodeid,username,image,lxcname,quota,ipaddr,gateway,brname,hostname): # prepare image and filesystem status = self.imgmgr.prepareFS(username,image,lxcname,str(quota.disk)) if not status: return [False, "Create container for batch failed when preparing filesystem"] rootfs = "/var/lib/lxc/%s/rootfs" % lxcname if not os.path.isdir("%s/global/users/%s" % (self.fspath,username)): path = env.getenv('DOCKLET_LIB') subprocess.call([path+"/master/userinit.sh", username]) logger.info("user %s directory not found, create it" % username) sys_run("mkdir -p /var/lib/lxc/%s" % lxcname) logger.info("generate config file for %s" % lxcname) def config_prepare(content): content = content.replace("%ROOTFS%",rootfs) content = content.replace("%HOSTNAME%",hostname) content = content.replace("%TASKID%",taskid) content = content.replace("%CONTAINER_MEMORY%",str(quota.memory)) content = content.replace("%CONTAINER_CPU%",str(quota.cpu*100000)) content = content.replace("%FS_PREFIX%",self.fspath) content = content.replace("%LXCSCRIPT%",env.getenv("LXC_SCRIPT")) content = content.replace("%USERNAME%",username) content = content.replace("%LXCNAME%",lxcname) content = content.replace("%VETHPAIR%",str(taskid)+"-"+str(vnodeid)) content = content.replace("%IP%",ipaddr) content = content.replace("%GATEWAY%",gateway) return content logger.info(self.confpath) conffile = open(self.confpath+"/container.batch.conf", 'r') conftext = conffile.read() conffile.close() conftext = config_prepare(conftext) conffile = open("/var/lib/lxc/%s/config" % lxcname, 'w') conffile.write(conftext) conffile.close() hook_env = {} hook_env['Bridge'] = brname hook_env['HNAME'] = hostname return self.prepare_hook_conf(rootfs+"/../env.conf",hook_env) def write_output(self,lxcname,tmplogpath,filepath): cmd = "lxc-attach -n " + lxcname + " -- mv %s %s" if filepath == "" or filepath == "/root/nfs/batch_{jobid}/" or os.path.abspath("/root/nfs/"+tmplogpath) == os.path.abspath(filepath): return [True,""] ret = subprocess.run(cmd % ("/root/nfs/"+tmplogpath,filepath),stdout=subprocess.PIPE,stderr=subprocess.STDOUT, shell=True) if ret.returncode != 0: msg = ret.stdout.decode(encoding="utf-8") logger.error(msg) return [False,msg] logger.info("Succeed to moving nfs/%s to %s" % (tmplogpath,filepath)) return [True,""] def execute_task(self,username,taskid,vnodeid,envs,lxcname,pkgpath,command,timeout,outpath,token): lxcfspath = "/var/lib/lxc/"+lxcname+"/rootfs/" scriptname = "batch_job.sh" try: scriptfile = open(lxcfspath+"root/"+scriptname,"w") scriptfile.write("#!/bin/bash\n") scriptfile.write("cd "+str(pkgpath)+"\n") scriptfile.write(command) scriptfile.close() except Exception as err: logger.error(traceback.format_exc()) logger.error("Fail to write script file with taskid(%s) vnodeid(%s)" % (str(taskid),str(vnodeid))) else: try: job_id = taskid.split('_')[0] except Exception as e: logger.error(traceback.format_exc()) job_id = "_none" jobdir = "batch_" + job_id logdir = "%s/global/users/%s/data/" % (self.fspath,username) + jobdir try: os.mkdir(logdir) except Exception as e: logger.info("Error when creating logdir :%s "+str(e)) stdoutname = str(taskid)+"_"+str(vnodeid)+"_stdout.txt" stderrname = str(taskid)+"_"+str(vnodeid)+"_stderr.txt" try: stdoutfile = open(logdir+"/"+stdoutname,"w") stderrfile = open(logdir+"/"+stderrname,"w") logger.info("Create stdout(%s) and stderr(%s) file to log" % (stdoutname, stderrname)) except Exception as e: logger.error(traceback.format_exc()) stdoutfile = None stderrfile = None cmd = "lxc-attach -n " + lxcname for envkey,envval in envs.items(): cmd = cmd + " -v %s=%s" % (envkey,envval) cmd = cmd + " -- /bin/bash \"" + "/root/" + scriptname + "\"" logger.info('run task with command - %s' % cmd) #p = subprocess.Popen(cmd,stdout=stdoutfile,stderr=stderrfile, shell=True) #logger.info(p) if timeout == 0: timeout = MAX_RUNNING_TIME try: ret = subprocess.run(cmd, stdout=stdoutfile, stderr=stderrfile, shell=True, timeout = timeout) except subprocess.TimeoutExpired as e: logger.info("Running time(%d) is out. Task(%s-%s-%s) will be killed." % (timeout,str(taskid),str(vnodeid),token)) self.add_msg(taskid,username,vnodeid,rpc_pb2.TIMEOUT,token,"Running time(%ds) is out." % timeout) except Exception as e: logger.error(traceback.format_exc()) logger.info("Someting is wrong:%s. Task(%s-%s-%s) will be killed." % (str(e),str(taskid),str(vnodeid),token)) self.add_msg(taskid,username,vnodeid,rpc_pb2.FAILED,token,"Runtime Error. More information in stderr log.") else: [success1,msg1] = self.write_output(lxcname,jobdir+"/"+stdoutname,outpath[0]) [success2,msg2] = self.write_output(lxcname,jobdir+"/"+stderrname,outpath[1]) if not success1 or not success2: if not success1: msg = msg1 else: msg = msg2 logger.info("Output error on Task(%s-%s-%s)." % (str(taskid),str(vnodeid),token)) self.add_msg(taskid,username,vnodeid,rpc_pb2.OUTPUTERROR,token,msg) else: if ret.returncode == 0: logger.info("Task(%s-%s-%s) completed." % (str(taskid),str(vnodeid),token)) self.add_msg(taskid,username,vnodeid,rpc_pb2.COMPLETED,token,"") else: logger.info("Task(%s-%s-%s) failed." % (str(taskid),str(vnodeid),token)) self.add_msg(taskid,username,vnodeid,rpc_pb2.FAILED,token,"Runtime Error. More information in stderr log.") def add_msg(self,taskid,username,vnodeid,status,token,errmsg): self.msgslock.acquire() try: self.taskmsgs.append(rpc_pb2.TaskMsg(taskid=str(taskid),username=username,vnodeid=int(vnodeid),subTaskStatus=status,token=token,errmsg=errmsg)) except Exception as err: logger.error(traceback.format_exc()) self.msgslock.release() def report_msg(self): channel = grpc.insecure_channel(self.master_ip+":"+self.master_port) stub = rpc_pb2_grpc.MasterStub(channel) while True: self.msgslock.acquire() reportmsg = rpc_pb2.ReportMsg(taskmsgs = self.taskmsgs) try: response = stub.report(reportmsg) logger.info("Response from master by reporting: "+str(response.status)+" "+response.message) except Exception as err: logger.error(traceback.format_exc()) self.taskmsgs = [] self.msgslock.release() time.sleep(self.report_interval) def start_report(self): thread = threading.Thread(target = self.report_msg, args=()) thread.setDaemon(True) thread.start() logger.info("Start to report task messages to master every %d seconds." % self.report_interval) def TaskWorkerServe(): max_threads = int(env.getenv('BATCH_MAX_THREAD_WORKER')) worker_port = int(env.getenv('BATCH_WORKER_PORT')) logger.info("Max Threads on a worker is %d" % max_threads) server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_threads)) rpc_pb2_grpc.add_WorkerServicer_to_server(TaskWorker(), server) server.add_insecure_port('[::]:'+str(worker_port)) server.start() logger.info("Start TaskWorker Servicer on port:%d" % worker_port) try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: server.stop(0) if __name__ == "__main__": TaskWorkerServe() ================================================ FILE: src/worker/worker.py ================================================ #!/usr/bin/python3 # first init env import sys if sys.path[0].endswith("worker"): sys.path[0] = sys.path[0][:-6] from utils import env, tools config = env.getenv("CONFIG") #config = "/opt/docklet/local/docklet-running.conf" tools.loadenv(config) # must import logger after initlogging, ugly from utils.log import initlogging initlogging("docklet-worker") from utils.log import logger import xmlrpc.server, sys, time from socketserver import ThreadingMixIn import threading from utils import etcdlib, proxytool from worker import container, monitor from utils.nettools import netcontrol,ovscontrol,portcontrol from utils.lvmtool import new_group, recover_group from master import network ################################################################## # Worker # Description : Worker starts at worker node to listen rpc request and complete the work # Init() : # get master ip # initialize rpc server # register rpc functions # initialize network # initialize lvm group # Start() : # register in etcd # setup GRE tunnel # start rpc service ################################################################## # imitate etcdlib to genernate the key of etcdlib manually def generatekey(path): clustername = env.getenv("CLUSTER_NAME") return '/'+clustername+'/'+path class ThreadXMLRPCServer(ThreadingMixIn,xmlrpc.server.SimpleXMLRPCServer): pass class Worker(object): def __init__(self, etcdclient, addr, port): self.addr = addr self.port = port logger.info ("begin initialize on %s" % self.addr) self.fspath = env.getenv('FS_PREFIX') self.poolsize = env.getenv('DISKPOOL_SIZE') self.etcd = etcdclient self.master = self.etcd.getkey("service/master")[1] self.mode = None self.workertype = "normal" self.key="" if len(sys.argv) > 1 and sys.argv[1] == "batch-worker": self.workertype = "batch" if self.workertype == "normal": # waiting state is preserved for compatible. self.etcd.setkey("machines/runnodes/"+self.addr, "waiting") # get this node's key to judge how to init. [status, key] = self.etcd.getkey("machines/runnodes/"+self.addr) if status: self.key = generatekey("machines/allnodes/"+self.addr) else: logger.error("get key failed. %s" % 'machines/runnodes/'+self.addr) sys.exit(1) # check token to check global directory [status, token_1] = self.etcd.getkey("token") tokenfile = open(self.fspath+"/global/token", 'r') token_2 = tokenfile.readline().strip() if token_1 != token_2: logger.error("check token failed, global directory is not a shared filesystem") sys.exit(1) logger.info ("worker registered and checked the token") # worker search all run nodes to judge how to init # If the node in all node list, we will recover it. # Otherwise, this node is new added in. value = 'init-new' [status, alllist] = self.etcd.listdir("machines/allnodes") for node in alllist: if node['key'] == self.key: value = 'init-recovery' break logger.info("worker start in "+value+" mode, worker type is"+self.workertype) Containers = container.Container(self.addr, etcdclient) if value == 'init-new': logger.info ("init worker with mode:new") self.mode='new' # check global directory do not have containers on this worker [both, onlylocal, onlyglobal] = Containers.diff_containers() if len(both+onlyglobal) > 0: logger.error ("mode:new will clean containers recorded in global, please check") sys.exit(1) [status, info] = Containers.delete_allcontainers() if not status: logger.error ("delete all containers failed") sys.exit(1) # create new lvm VG at last new_group("docklet-group",self.poolsize,self.fspath+"/local/docklet-storage") #subprocess.call([self.libpath+"/lvmtool.sh", "new", "group", "docklet-group", self.poolsize, self.fspath+"/local/docklet-storage"]) elif value == 'init-recovery': logger.info ("init worker with mode:recovery") self.mode='recovery' # recover lvm VG first recover_group("docklet-group",self.fspath+"/local/docklet-storage") #subprocess.call([self.libpath+"/lvmtool.sh", "recover", "group", "docklet-group", self.fspath+"/local/docklet-storage"]) [status, meg] = Containers.check_allcontainers() if status: logger.info ("all containers check ok") else: logger.info ("not all containers check ok") #sys.exit(1) else: logger.error ("worker init mode:%s not supported" % value) sys.exit(1) # init portcontrol logger.info("init portcontrol") portcontrol.init_new() # initialize rpc # xmlrpc.server.SimpleXMLRPCServer(addr) -- addr : (ip-addr, port) # if ip-addr is "", it will listen ports of all IPs of this host logger.info ("initialize rpcserver %s:%d" % (self.addr, int(self.port))) # logRequests=False : not print rpc log #self.rpcserver = xmlrpc.server.SimpleXMLRPCServer((self.addr, self.port), logRequests=False) self.rpcserver = ThreadXMLRPCServer((self.addr, int(self.port)), allow_none=True, logRequests=False) self.rpcserver.register_introspection_functions() self.rpcserver.register_instance(Containers) self.rpcserver.register_function(monitor.workerFetchInfo) self.rpcserver.register_function(netcontrol.setup_gw) self.rpcserver.register_function(netcontrol.del_gw) self.rpcserver.register_function(netcontrol.del_bridge) self.rpcserver.register_function(ovscontrol.add_port_gre_withkey) self.rpcserver.register_function(netcontrol.check_gw) self.rpcserver.register_function(netcontrol.recover_usernet) self.rpcserver.register_function(proxytool.set_route) self.rpcserver.register_function(proxytool.delete_route) self.rpcserver.register_function(portcontrol.acquire_port_mapping) self.rpcserver.register_function(portcontrol.release_port_mapping) # register functions or instances to server for rpc #self.rpcserver.register_function(function_name) # init collector to collect monitor infomation self.con_collector = monitor.Container_Collector() self.hosts_collector = monitor.Collector() # delete the existing network #[success, bridges] = ovscontrol.list_bridges() #if success: # for bridge in bridges: # if bridge.startswith("docklet-br"): # ovscontrol.del_bridge(bridge) #else: # logger.error(bridges) #[success, message] = ovscontrol.destroy_all_qos() #if not success: # logger.error(message) '''if (self.addr == self.master): logger.info ("master also on this node. reuse master's network") else: logger.info ("initialize network") # 'docklet-br' of worker do not need IP Addr. #[status, result] = self.etcd.getkey("network/workbridge") #if not status: # logger.error ("get bridge IP failed, please check whether master set bridge IP for worker") #self.bridgeip = result # create bridges for worker #network.netsetup("init", self.bridgeip) if self.mode == 'new': if netcontrol.bridge_exists('docklet-br'): netcontrol.del_bridge('docklet-br') netcontrol.new_bridge('docklet-br') else: if not netcontrol.bridge_exists('docklet-br'): utils logger.error("docklet-br not found") sys.exit(1) logger.info ("setup GRE tunnel to master %s" % self.master) #network.netsetup("gre", self.master) #if not netcontrol.gre_exists('docklet-br', self.master): #netcontrol.setup_gre('docklet-br', self.master)''' # start service of worker def start(self): # start collector self.con_collector.start() self.hosts_collector.start() logger.info("Monitor Collector has been started.") # worker change it state itself. Independedntly from master. if self.workertype == "normal": self.etcd.setkey("machines/runnodes/"+self.addr, "work") publicIP = env.getenv("PUBLIC_IP") self.etcd.setkey("machines/publicIP/"+self.addr,publicIP) self.thread_sendheartbeat = threading.Thread(target=self.sendheartbeat) self.thread_sendheartbeat.start() # start serving for rpc logger.info ("begins to work") self.rpcserver.serve_forever() # send heardbeat package to keep alive in etcd, ttl=2s def sendheartbeat(self): if self.workertype == "normal": while(True): # check send heartbeat package every 1s time.sleep(2) [status, value] = self.etcd.getkey("machines/runnodes/"+self.addr) if status: # master has know the worker so we start send heartbeat package if value=='ok': self.etcd.setkey("machines/runnodes/"+self.addr, "ok", ttl = 60) else: logger.error("get key %s failed, master may be crashed" % self.addr) self.etcd.setkey("machines/runnodes/"+self.addr, "ok", ttl = 60) elif self.workertype == "batch": while(True): time.sleep(2) self.etcd.setkey("machines/batchnodes/"+self.addr, "ok", ttl = 60) if __name__ == '__main__': etcdaddr = env.getenv("ETCD") logger.info ("using ETCD %s" % etcdaddr ) clustername = env.getenv("CLUSTER_NAME") logger.info ("using CLUSTER_NAME %s" % clustername ) # get network interface net_dev = env.getenv("NETWORK_DEVICE") logger.info ("using NETWORK_DEVICE %s" % net_dev ) ipaddr = network.getip(net_dev) if ipaddr is False: logger.error("network device is not correct") sys.exit(1) else: logger.info("using ipaddr %s" % ipaddr) # init etcdlib client try: etcdclient = etcdlib.Client(etcdaddr, prefix = clustername) except Exception: logger.error ("connect etcd failed, maybe etcd address not correct...") sys.exit(1) else: logger.info("etcd connected") cpu_quota = env.getenv('CONTAINER_CPU') logger.info ("using CONTAINER_CPU %s" % cpu_quota ) mem_quota = env.getenv('CONTAINER_MEMORY') logger.info ("using CONTAINER_MEMORY %s" % mem_quota ) worker_port = env.getenv('WORKER_PORT') logger.info ("using WORKER_PORT %s" % worker_port ) logger.info("Starting worker") worker = Worker(etcdclient, addr=ipaddr, port=worker_port) worker.start() ================================================ FILE: tools/DOCKLET_NOTES.txt ================================================ ** MUST READ ** 1. Please keep your important data in ~/nfs directory. It will not be destroyed even if the workspace is deleted. 2. If you delete your workspace, all data in your Home directory will be lost, except those in ~/nfs directory. 3. You can save your workspace as a private image if you have modified the system and do not want to repeat it in your new workspace or new container. 4. Your containers are distributed by default. So it is ideal for simple parallel jobs. 5. If you find the Web Terminal not align correctly, choose a monospace font may help. ================================================ FILE: tools/R_demo.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# 一个R语言实现的爬虫,爬取拉手网美食信息" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "点击下边的cell,点击上方工具栏里的执行图标,即可执行代码块,看到输出结果。代码块左边的In[]出现In[*]表示代码正在执行" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "library(XML)\n", "\n", "giveNames = function(rootNode){\n", " names <- xpathSApply(rootNode,\"//h3/a[@class='goods-name']\",xmlValue)\n", " names\n", "}\n", "\n", "givesevices = function(rootNode){\n", " sevices <- xpathSApply(rootNode,\"//h3/a[@class='goods-text']\",xmlValue)\n", " sevices\n", "}\n", "\n", "\n", "giveprices = function(rootNode){\n", " prices <- xpathSApply(rootNode,\"//div/span[@class='price']\",xmlValue)\n", " prices\n", "}\n", "\n", "\n", "givemoney = function(rootNode){\n", " money <- xpathSApply(rootNode,\"//div/span[@class='money']\",xmlValue)\n", " money\n", "}\n", "\n", "\n", "giveplaces = function(rootNode){\n", " places <- xpathSApply(rootNode,\"//a/span[@class='goods-place']\",xmlValue)\n", " places\n", "}\n", "\n", "\n", "getmeituan = function(URL){\n", " Sys.sleep(runif(1,1,2))\n", " doc<-htmlParse(URL[1],encoding=\"UTF-8\")\n", " rootNode<-xmlRoot(doc)\n", " data.frame(\n", " Names=giveNames(rootNode), #店名\n", " services=givesevices(rootNode), #服务\n", " prices=giveprices(rootNode), #现价\n", " money=givemoney(rootNode), #原价\n", " places=giveplaces(rootNode) #地点\n", " \n", " )\n", "}\n", "\n", "\n", "URL = paste0(\"http://shenzhen.lashou.com/cate/meishi/page\",1:10)\n", "\n", "mainfunction = function(URL){\n", " data = rbind(\n", " getmeituan (URL[1]),\n", " getmeituan (URL[2]),\n", " getmeituan (URL[3]),\n", " getmeituan (URL[4]),\n", " getmeituan (URL[5])\n", " )\n", " \n", " \n", "}\n", "ll=mainfunction(URL)\n", "write.table(ll,\"result.txt\",row.names=FALSE)\n", "ll\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# R语言的线性回归实例" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "输入数据" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "#读入数据\n", "x<- c(0.10,0.11,0.12,0.13,0.14,0.15,0.16,0.17,0.18,0.20,0.21,0.23)\n", "y<-c(42.0,43.5,45.0,45.5,45.0,47.5,49,53,50,55,55,60)\n", "#绘出 x 与 y 的散列图\n", "plot(y~x)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "执行该段代码,即可看到输出图形;从图中我们可以看出 y 和 x 存在线性相关性,可以进行线性回归分析:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "model<-lm(y~x)\n", "summary(model)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "我们通过 P 值(就是上面的 pr 那一列)来查看对应的解释变量 x 的显著性,通过将 p 值与 0.05 进行比较,若改值小于 0.05,就可以说该变量与被解释变量存在显著的相关性。\n", "\n", "Multiple R-squared 和 Adjusted R-squared 这两个值,就是我们常称为”拟合优度“和”修正的拟合优度“,是指回归方程对样本的拟合程度,这里我们可以看到,修正的拟合优度为 0.9429,表示拟合程度超过五成,这个值越高越好。\n", "\n", "最后,看下 F-statistic,也就是常说的 F 统计量,也称为 F 检验,常用语判断方程整体的显著性实验,其 p 值为 9.505e-08,显然小于 0.05,我们可以认为方程在 P=0.05 的水平上是通过显著性检验的。\n", "\n", "从上面我们看出我们的线性回归效果不错,那么我们可以利用拟合方程进行分类,或者预测。" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "newX<-data.frame(x=0.16)\n", "predict(model,newdata=newX,interval=\"prediction\",level=0.95)#interval=”prediction“ level指定预测的置信区间" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# R语言的逻辑回归示例" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "counts <- c(18,17,15,20,10,20,25,13,12)\n", "outcome <- gl(3,1,9)\n", "treatment <- gl(3,3)\n", "print(d.AD <- data.frame(treatment, outcome, counts))\n", "glm.D93 <- glm(counts ~ outcome + treatment, family = poisson())\n", "anova(glm.D93)\n", "summary(glm.D93)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "R", "language": "R", "name": "ir" }, "language_info": { "codemirror_mode": "r", "file_extension": ".r", "mimetype": "text/x-r-source", "name": "R", "pygments_lexer": "r", "version": "3.2.3" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: tools/alterUserTable.py ================================================ import sys sys.path.append("../src/") from model import db,User print("Query all users:") users = User.query.all() print(users) newusers = [] print("Copy data to new users and set their beans to 10000...") for user in users: newuser = User(user.username,user.password,user.avatar,user.nickname,user.description,user.status, user.e_mail,user.student_number,user.department,user.truename,user.tel,user.register_date, user.user_group,user.auth_method) newuser.beans = 10000 newusers.append(newuser) print("Drop all table...") db.drop_all(bind='__all__') print("Create all tables with beans...") setattr(User,'beans',db.Column(db.Integer)) db.create_all(bind='__all__') for newuser in newusers: db.session.add(newuser) db.session.commit() print("Update users table successfully!") ================================================ FILE: tools/clean-usage.py ================================================ #!/usr/bin/python3 import os, json, sys sys.path.append("../src/") from model import db, User, UserUsage def clean_usage(username,alluser=False): if alluser: usages = UserUsage.query.all() for usage in usages: usage.cpu = str(0) usage.memory = str(0) usage.disk = str(0) db.session.commit() else: usage = UserUsage.query.filter_by(username = username).first() usage.cpu = str(0) usage.memory = str(0) usage.disk = str(0) db.session.commit() return if __name__ == '__main__': if len(sys.argv) >= 2: username = sys.argv[1] clean_usage(username) else: clean_usage("user",True) ================================================ FILE: tools/cloudsetting.aliyun.template.json ================================================ { "CloudName": "aliyun", "AccessKeyId": "your-key", "AccessKeySecret": "your-secret", "RegionId": "cn-beijing", "ZoneId": "cn-beijing-a", "InstanceType": "ecs.sn1ne.xlarge", "SystemDisk.Size": 500, "Password": "Unias1234", "VSwitchId": "the vswitchid of your vpc", "VolumeName": "the volume name (with host name) of your glusterfs" } ================================================ FILE: tools/dl_start_spark.sh ================================================ #!/bin/sh # a naive script to fast start spark cluster, assuming host-0 master, # others slaves. # used with dl_stop_spark.sh SPARK_HOME=/home/spark HOSTS=`grep -v localhost /etc/hosts | awk '{print $2}'` echo "Starting master in host-0" $SPARK_HOME/sbin/start-master.sh for h in $HOSTS ; do echo "Starting slave in $h" if [ $h != 'host-0' ] ; then ssh root@$h /home/spark/sbin/start-slave.sh spark://host-0:7077 else /home/spark/sbin/start-slave.sh spark://host-0:7077 fi done ================================================ FILE: tools/dl_stop_spark.sh ================================================ #!/bin/sh # a naive script to stop spark cluster, assuming host-0 master # others slaves # used with dl_start_spark.sh SPARK_HOME=/home/spark HOSTS=`grep -v localhost /etc/hosts | awk '{print $2}'` for h in $HOSTS ; do echo "Stopping slave in $h" if [ $h != 'host-0' ] ; then ssh root@$h /home/spark/sbin/stop-slave.sh else /home/spark/sbin/stop-slave.sh fi done echo "Stopping master in host-0" $SPARK_HOME/sbin/stop-master.sh ================================================ FILE: tools/docklet-deploy.sh ================================================ apt-get update apt-get install -y git git clone http://github.com/unias/docklet.git /home/docklet /home/docklet/prepare.sh cp /home/docklet/conf/docklet.conf.template /home/docklet/conf/docklet.conf cp /home/docklet/web/templates/home.template /home/docklet/web/templates/home.html NETWORK_DEVICE=`route | grep default | awk {'print $8'};` echo "DISKPOOL_SIZE=200000 ETCD=%MASTERIP%:2379 NETWORK_DEVICE=$NETWORK_DEVICE PROXY_PORT=8000 NGINX_PORT=80" >> /home/docklet/conf/docklet.conf #please modify the mount command for your corresponding distributed file system if you are not using glusterfs mount -t glusterfs %VOLUMENAME% /opt/docklet/global/ if [ -f /opt/docklet/global/packagefs.tgz ]; then tar zxvf /opt/docklet/global/packagefs.tgz -C /opt/docklet/local/ > /dev/null fi /home/docklet/bin/docklet-worker start exit 0 ================================================ FILE: tools/etcd-multi-nodes.sh ================================================ #!/bin/bash # more details for https://coreos.com/etcd/docs/latest which etcd &>/dev/null || { echo "etcd not installed, please install etcd first" && exit 1; } if [ $# -eq 0 ] ; then echo "Usage: `basename $0` index selfip ip1 ip2 ip3" echo " ip1 ip2 ip3 are the ip address of node etcd_1 etcd_2 etcd_3" exit 1 fi etcdindex=etcd_$1 shift selfip=$1 shift etcd_1=$1 index=1 while [ $# -gt 0 ] ; do h="etcd_$index" if [ $index -eq 1 ] ; then CLUSTER="$h=http://$1:2380" else CLUSTER="$CLUSTER,$h=http://$1:2380" fi index=$(($index+1)) shift done # -initial-advertise-peer-urls : tell others what peer urls of me # -listen-peer-urls : what peer urls of me # -listen-client-urls : what client urls to listen # -advertise-client-urls : tell others what client urls to listen of me # -initial-cluster-state : new means join a new cluster; existing means join an existing cluster # : new not means clear depdir=${0%/*} tempdir=/opt/docklet/local [ ! -d $tempdir/log ] && mkdir -p $tempdir/log [ ! -d $tempdir/run ] && mkdir -p $tempdir/run etcd --name $etcdindex \ --initial-advertise-peer-urls http://$selfip:2380 \ --listen-peer-urls http://$selfip:2380 \ --listen-client-urls http://$selfip:2379,http://127.0.0.1:2379 \ --advertise-client-urls http://$selfip:2379 \ --initial-cluster-token etcd-cluster \ --initial-cluster $CLUSTER \ --initial-cluster-state new > $tempdir/log/etcd.log 2>&1 & etcdpid=$! echo "etcd start with pid: $etcdpid and log:$tempdir/log/etcd.log" echo $etcdpid > $tempdir/run/etcd.pid ================================================ FILE: tools/etcd-one-node.sh ================================================ #!/bin/sh # more details for https://coreos.com/etcd/docs/latest #which etcd &>/dev/null || { echo "etcd not installed, please install etcd first" && exit 1; } which etcd >/dev/null || { echo "etcd not installed, please install etcd first" && exit 1; } etcd_1=localhost if [ $# -gt 0 ] ; then etcd_1=$1 fi # -initial-advertise-peer-urls : tell others what peer urls of me # -listen-peer-urls : what peer urls of me # -listen-client-urls : what client urls to listen # -advertise-client-urls : tell others what client urls to listen of me # -initial-cluster-state : new means join a new cluster; existing means a new node join an existing cluster # : new not means clear, old data is still alive depdir=${0%/*} tempdir=/opt/docklet/local [ ! -d $tempdir/log ] && mkdir -p $tempdir/log [ ! -d $tempdir/run ] && mkdir -p $tempdir/run echo "starting etcd on $etcd_1" #stdbuf -o0 -e0 $tempdir/etcd --name etcd_1 \ etcd --name etcd_1 \ --data-dir $tempdir/etcd_data \ --initial-advertise-peer-urls http://$etcd_1:2380 \ --listen-peer-urls http://$etcd_1:2380 \ --listen-client-urls http://$etcd_1:2379 \ --advertise-client-urls http://$etcd_1:2379 \ --initial-cluster-token etcd_cluster \ --initial-cluster etcd_1=http://$etcd_1:2380 \ --initial-cluster-state new > $tempdir/log/etcd.log 2>&1 & etcdpid=$! echo "etcd start with pid: $etcdpid and log:$tempdir/log/etcd.log" echo $etcdpid > $tempdir/run/etcd.pid ================================================ FILE: tools/nginx_config.sh ================================================ #!/bin/sh MASTER_IP=0.0.0.0 NGINX_PORT=8080 PROXY_PORT=8000 WEB_PORT=8888 NGINX_CONF=/etc/nginx toolsdir=${0%/*} DOCKLET_TOOLS=$(cd $toolsdir; pwd) DOCKLET_HOME=${DOCKLET_TOOLS%/*} DOCKLET_CONF=$DOCKLET_HOME/conf . $DOCKLET_CONF/docklet.conf NGINX_CONF=${NGINX_CONF}/sites-enabled echo "copy nginx_docklet.conf to nginx config path..." cp $DOCKLET_CONF/nginx_docklet.conf ${NGINX_CONF}/ sed -i "s/%MASTER_IP/${MASTER_IP}/g" ${NGINX_CONF}/nginx_docklet.conf sed -i "s/%NGINX_PORT/${NGINX_PORT}/g" ${NGINX_CONF}/nginx_docklet.conf sed -i "s/%PROXY_PORT/${PROXY_PORT}/g" ${NGINX_CONF}/nginx_docklet.conf sed -i "s/%WEB_PORT/${WEB_PORT}/g" ${NGINX_CONF}/nginx_docklet.conf if [ "${NGINX_PORT}" != "80" ] && [ "${NGINX_PORT}" != "443" ] then sed -i "s/\$host/\$host:\$server_port/g" ${NGINX_CONF}/nginx_docklet.conf fi echo "restart nginx..." /etc/init.d/nginx restart ================================================ FILE: tools/npmrc ================================================ registry = https://registry.npm.taobao.org ================================================ FILE: tools/pip.conf ================================================ [global] index-url=https://pypi.mirrors.ustc.edu.cn/simple/ ================================================ FILE: tools/python_demo.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# 用Python分析《美女与野兽》" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "在一篇最近发表的论文*A quantitative analysis of gendered compliments in Disney Princess films*中,Carmen Fought和Karen Eisenhauer发现在这部迪士尼经典影片中女性角色的对话要多于迪士尼近期的电影作品。作者在网络上发现了美女与《野兽》的脚本,因此我立刻用Python重做了他们的分析。\n", "
更多地,我在文章最后加入了对《玩具总动员》的分析,这个脚本的形式完全不同,但其中91%的对白来自男性角色。" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "点击下边的cell,点击上方工具栏里的执行图标,即可执行代码块,看到输出结果。代码块左边的In[]出现In[*]表示代码正在执行" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "from __future__ import division\n", "\n", "import re\n", "from collections import defaultdict\n", "\n", "import requests\n", "import pandas as pd\n", "import matplotlib\n", "\n", "%matplotlib inline\n", "matplotlib.style.use('ggplot')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# Load the script which comes as a text file\n", "\n", "script_url = 'http://www.fpx.de/fp/Disney/Scripts/BeautyAndTheBeast.txt'\n", "script = requests.get(script_url).text" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "我们看下脚本的开篇:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# Let's look at the beginning of the script\n", "\n", "script.splitlines()[:20]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "再在中间随意选取一段:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# Let's look at a random place\n", "\n", "script.splitlines()[500:520]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "看上去很容易分析,因为角色和对白间用:隔开" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# seems fairly easy to parse since \n", "# each new speaking line has : and begins with all caps\n", "\n", "def remove_spaces(line):\n", " # remove the weird spaces\n", " return re.sub(' +',' ',line)\n", "\n", "def remove_paren(line):\n", " # remove directions that are not spoken\n", " return re.sub(r'\\([^)]*\\)', '', line)\n", "\n", "\n", "lines = []\n", "line = ''\n", "for row in script.splitlines():\n", " if ': ' in row and row[:3].upper() == row[:3]:\n", " line = remove_spaces(line)\n", " line = remove_paren(line)\n", " lines.append(line)\n", " line = row\n", " elif ' ' in row:\n", " line = line + ' ' + row.lstrip()\n", "# don't forget the last line\n", "lines.append(remove_spaces(line))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "lines[:15]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "看看结尾什么样:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# How does the end look\n", "\n", "lines[-5:]" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# 我们去掉可能的空白行\n", "\n", "print (len(lines))\n", "lines = [l for l in lines if len(l) > 0]\n", "print (len(lines))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "现在,我们找出所有角色,并计算他们的出场次数(对白数)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# now figure out the roles and how many times they appear\n", "\n", "roles = defaultdict(int)\n", "\n", "for line in lines:\n", " # take advantage of the fact that the speaker is always listed before the :\n", " speaker = line.split(':')[0]\n", " roles[speaker] = roles[speaker] + 1" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "len(roles)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "看一下每个角色出现的相对频率:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# take a look at the relative frequency of each role\n", "roles" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "看起来有一行“to think about”是乱入的(恰好满足了parse条件),我们忽略它" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# Looks like there is one bum line ('to think about'')\n", "# But I'll ignore that for now.\n", "\n", "# Quickly eye ball which roles are female and which are possibly mixed groups.\n", "\n", "females = ['WOMAN 1',\n", " 'WOMAN 2',\n", " 'WOMAN 3',\n", " 'WOMAN 4',\n", " 'WOMAN 5',\n", " 'OLD CRONIES',\n", " 'MRS. POTTS',\n", " 'BELLE',\n", " 'BIMBETTE 1'\n", " 'BIMBETTE 2',\n", " 'BIMBETTE 3']\n", "\n", "groups = ['MOB',\n", " 'ALL',\n", " 'BOTH']" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "将每一行对白根据角色性别进行标记,并统计不同性别的对白数量" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# Mark each line of dialogue by sex and count them\n", "\n", "sex_lines = {'Male': 0,\n", " 'Female': 0}\n", "\n", "for line in lines:\n", " # Extract speaker \n", " speaker = line.split(':')[0]\n", " \n", " if speaker in females:\n", " sex_lines['Female'] += 1\n", " \n", " elif sex_lines not in groups:\n", " sex_lines['Male'] += 1\n", "\n", "print (sex_lines)\n", "print (sex_lines['Male']/(sex_lines['Male'] + sex_lines['Female']))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "我们使用一张图来显示结果:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# Quick graphical representation \n", "\n", "df = pd.DataFrame([sex_lines.values()],columns=sex_lines.keys())\n", "df.plot(kind='bar')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "也许男性角色和女性角色的对白长度有明显不同?我们来看一看
这次我们计算对白中单词数量而不是计算对白次数:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# Maybe men and women talk for different lengths? This counts words instead of \n", "\n", "sex_words = {'Male': 0,\n", " 'Female': 0}\n", "\n", "for line in lines:\n", " speaker = line.split(':')[0]\n", " dialogue = line.split(':')[1] \n", " # remove the \n", " # tokenize sentence by spaces\n", " word_count = len(dialogue.split(' ')) \n", " \n", " if speaker in females:\n", " sex_words['Female'] += word_count\n", " elif speaker not in groups:\n", " sex_words['Male'] += word_count\n", "\n", "print (sex_words)\n", "print (sex_words['Male']/(sex_words['Male'] + sex_words['Female']))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "也用图表显示出来:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# Quick graphical representation \n", "\n", "df = pd.DataFrame([sex_words.values()],columns=sex_words.keys())\n", "df.plot(kind='bar')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "下面是额外的《玩具总动员》的分析" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# Bonus toy story analysis\n", "\n", "url = 'http://www.dailyscript.com/scripts/toy_story.html'\n", "toy_story_script = requests.get(url).text\n", "\n", "# toy_story_script.splitlines()[250:350]\n", "\n", "lines = []\n", "speaker = ''\n", "dialogue = ''\n", "for row in toy_story_script.splitlines()[90:]:\n", " if ' ' in row: \n", " if ':' not in speaker:\n", " lines.append( {'Speaker': remove_paren(speaker).strip(),\n", " 'Dialogue': remove_paren(dialogue).strip() } )\n", " \n", " speaker = remove_spaces(row.strip())\n", " dialogue = ''\n", " elif ' ' in row:\n", " dialogue = dialogue + ' ' + remove_spaces(row)\n", "lines.append( {'Speaker': remove_paren(speaker).strip(),\n", " 'Dialogue': remove_paren(dialogue).strip() } )\n", "\n", "roles = defaultdict(int)\n", "\n", "for line in lines:\n", " speaker = line['Speaker']\n", " roles[speaker] = roles[speaker] + 1\n", "\n", "toy_story_df = pd.DataFrame(lines[1:])\n", "toy_story_df.head()\n", "\n", "toy_story_df.Speaker.value_counts()\n", "\n", "def what_sex(speaker):\n", " if speaker in [\"SID'S MOM\", 'MRS. DAVIS', 'HANNAH', 'BO PEEP']:\n", " return 'Female'\n", " return 'Male'\n", "\n", "toy_story_df['Sex'] = toy_story_df['Speaker'].apply(what_sex)\n", "\n", "sex_df = toy_story_df.groupby('Sex').size()\n", "sex_df.plot(kind='bar')\n", "sex_df\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def word_count(dialogue):\n", " return len(dialogue.split())\n", "\n", "toy_story_df['Word Count'] = toy_story_df['Dialogue'].apply(word_count)\n", "\n", "word_df = toy_story_df.groupby('Sex')['Word Count'].sum()\n", "word_df.plot(kind='bar')\n", "word_df" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.1+" } }, "nbformat": 4, "nbformat_minor": 0 } ================================================ FILE: tools/resolv.conf ================================================ nameserver 162.105.129.26 nameserver 162.105.129.27 ================================================ FILE: tools/sources.list ================================================ deb https://mirrors.ustc.edu.cn/ubuntu/ xenial main restricted universe multiverse ================================================ FILE: tools/start_jupyter.sh ================================================ #!/bin/sh # # this script should be placed in basefs/home/jupyter # # This next line determines what user the script runs as. DAEMON_USER=root # settings for docklet worker DAEMON=`which jupyterhub-singleuser` DAEMON_NAME=jupyter # The process ID of the script when it runs is stored here: PIDFILE=/home/jupyter/$DAEMON_NAME.pid RUN_DIR=/root #export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games #export HOME=/home #export SHELL=/bin/bash #export LOGNAME=root # JPY_API_TOKEN is needed by jupyterhub-singleuser # it will send this token in request header to hub-api-url for authorization # but we don't use this by now export JPY_API_TOKEN=not-use # user for this notebook USER=root # port to start service PORT=10000 # cookie name to get from http request and send to hub_api_url for authorization COOKIE_NAME=docklet-jupyter-cookie # base url of this server. client will use this url for request BASE_URL=/workspace/$USER # prefix for login and logout HUB_PREFIX=/jupyter # URL for authorising cookie HUB_API_URL=http://192.168.192.64:9000/jupyter # IP for listening request IP=0.0.0.0 [ -f /home/jupyter/jupyter.config ] && . /home/jupyter/jupyter.config [ -z $IP ] && IP=$(ip address show dev eth0 | grep -P -o '10\.[0-9]*\.[0-9]*\.[0-9]*(?=/)') DAEMON_OPTS="--no-browser --user=$USER --port=$PORT --cookie-name=$COOKIE_NAME --base-url=$BASE_URL --hub-prefix=$HUB_PREFIX --hub-api-url=$HUB_API_URL --ip=$IP --debug" . /lib/lsb/init-functions ########### start-stop-daemon --start --oknodo --background -d $RUN_DIR --pidfile $PIDFILE --make-pidfile --user $DAEMON_USER --chuid $DAEMON_USER --startas $DAEMON -- $DAEMON_OPTS ================================================ FILE: tools/update-UserTable.sh ================================================ #!/bin/bash echo "Backup UserTable..." cp /opt/docklet/global/sys/UserTable.db /opt/docklet/global/sys/UserTable.db.backup sed -i "s/^ beans/# beans/g" ../src/model.py sed -i "s/^ self.beans/# self.beans/g" ../src/model.py echo "Alter UserTable..." python3 alterUserTable.py git checkout -- ../ ================================================ FILE: tools/update-basefs.sh ================================================ #!/bin/sh ## WARNING ## This sript is just for my own convenience . my image is ## based on Ubuntu xenial. I did not test it for other distros. ## Therefore this script may not work for your basefs image. ## if [ "$1" != "-y" ] ; then echo "This script will update your basefs. backup it first." echo "then run: $0 -y" exit 1 fi # READ docklet.conf FS_PREFIX=/opt/docklet BASEFS=$FS_PREFIX/local/basefs CONF=../conf/docklet.conf echo "Reading $CONF" if [ -f $CONF ] ; then . $CONF BASEFS=$FS_PREFIX/local/basefs echo "$CONF exit, basefs=$BASEFS" else echo "$CONF not exist, default basefs=$BASEFS" fi if [ ! -d $BASEFS ] ; then echo "Checking $BASEFS: not exist, FAIL" exit 1 else echo "Checking $BASEFS: exist. " fi echo "[*] Copying start_jupyter.sh to $BASEFS/home/jupyter" mkdir -p $BASEFS/home/jupyter cp start_jupyter.sh $BASEFS/home/jupyter echo "" echo "[*] Changing $BASEFS/etc/network/interfaces using static" echo "Original network/interfaces is" cat $BASEFS/etc/network/interfaces | sed 's/^/OLD /' sed -i -- 's/dhcp/static/g' $BASEFS/etc/network/interfaces # setting resolv.conf, use your own resolv.conf for your image echo "[*] Setting $BASEFS/etc/resolv.conf" cp resolv.conf $BASEFS/etc/resolvconf/resolv.conf.d/base echo "[*] Masking console-getty.service" chroot $BASEFS systemctl mask console-getty.service echo "[*] Masking system-journald.service" chroot $BASEFS systemctl mask systemd-journald.service echo "[*] Masking system-logind.service" chroot $BASEFS systemctl mask systemd-logind.service echo "[*] Masking dbus.service" chroot $BASEFS systemctl mask dbus.service echo "[*] Disabling apache2 service(if installed)" if [ -d $BASEFS/etc/apache2 ] ; then chroot $BASEFS update-rc.d apache2 disable fi echo "[*] Disabling ondemand service(if installed)" chroot $BASEFS update-rc.d ondemand disable echo "[*] Disabling dbus service(if installed)" chroot $BASEFS update-rc.d dbus disable echo "[*] Disabling mysql service(if installed)" if [ -d $BASEFS/etc/mysql ] ; then chroot $BASEFS update-rc.d mysql disable fi echo "[*] Disabling nginx service(if installed)" if [ -d $BASEFS/etc/nginx ] ; then chroot $BASEFS update-rc.d nginx disable fi echo "[*] Setting worker_processes of nginx to 1(if installed)" [ -f $BASEFS/etc/nginx/nginx.conf ] && sed -i -- 's/worker_processes\ auto/worker_processes\ 1/g' $BASEFS/etc/nginx/nginx.conf echo "[*] Deleting default /etc/nginx/sites-enabled/default" rm -f $BASEFS/etc/nginx/sites-enabled/default echo "[*] Copying vimrc.local to $BASEFS/etc/vim/" cp vimrc.local $BASEFS/etc/vim echo "[*] Copying pip.conf to $BASEFS/root/.pip/" mkdir -p $BASEFS/root/.pip/ cp pip.conf $BASEFS/root/.pip echo "[*] Copying npmrc to $BASEFS/root/.npmrc" cp npmrc $BASEFS/root/.npmrc echo "[*] Copying DOCKLET_NOTES.txt to $BASEFS/root/DOCKLET_NOTES.txt" cp DOCKLET_NOTES.txt $BASEFS/root/ echo "[*] Updating USER/.ssh/config to disable StrictHostKeyChecking" for f in $FS_PREFIX/global/users/* ; do cat < $f/ssh/config Host * StrictHostKeyChecking no UserKnownHostsFile=/dev/null EOF done echo "[*] Generating $BASEFS/home/spark/sbin/dl_{start|stop}_spark.sh for Spark" if [ -d $BASEFS/home/spark/sbin ] ; then cp dl_*_spark.sh $BASEFS/home/spark/sbin fi echo "[*] Generating $BASEFS/root/{R|python}_demo.ipynb" if [ -d $BASEFS/root/ ] ; then cp R_demo.ipynb python_demo.ipynb $BASEFS/root/ fi ================================================ FILE: tools/update_baseurl.sh ================================================ #!/bin/sh toolsdir=${0%/*} DOCKLET_TOOLS=$(cd $toolsdir; pwd) DOCKLET_HOME=${DOCKLET_TOOLS%/*} DOCKLET_CONF=$DOCKLET_HOME/conf . $DOCKLET_CONF/docklet.conf masterip=$(ifconfig ${NETWORK_DEVICE} | awk '/inet/ {print $2}' | awk -F: '{print $2}' | head -1) cons=$(ls /var/lib/lxc) echo ${masterip} for i in ${cons} do sed -i "s/BASE_URL=\/go/BASE_URL=\/${masterip}\/go/g" /var/lib/lxc/${i}/rootfs/home/jupyter/jupyter.config running=$(lxc-info -n ${i} | grep RUNNING) if [ "${running}" != '' ] then echo "Stop ${i}..." lxc-stop -k -n ${i} echo "Start ${i}..." lxc-start -n ${i} lxc-attach -n ${i} -- su -c /home/jupyter/start_jupyter.sh lxc-attach -n ${i} -- service ssh start fi done ================================================ FILE: tools/update_con_network.py ================================================ import sys,os sys.path.append("../src/") import env,requests if len(sys.argv) < 2: print("Please enter USER_IP") exit() userpoint = "http://" + sys.argv[1] + ":" + str(env.getenv('USER_PORT')) auth_key = env.getenv('AUTH_KEY') def post_to_user(url = '/', data={}): return requests.post(userpoint+url,data=data).json() cons = os.listdir('/var/lib/lxc') for con in cons: print("Update %s..."%(con)) namesplit = con.split('-') user = namesplit[0] res = post_to_user('/user/uid/',{'username':user,'auth_key':auth_key}) try: configfile = open('/var/lib/lxc/'+con+'/config','r') except: continue context = configfile.read() configfile.close() #print(context) #print(res['uid']) context = context.replace("docklet-br","docklet-br-"+str(res['uid'])) newfile = open('/var/lib/lxc/'+con+'/config','w') newfile.write(context) newfile.close() ================================================ FILE: tools/update_v0.3.2.py ================================================ import json def isexist(quotas, key): flag = False for quota in quotas: if quota['name'] == key: flag = True return flag return flag fspath = '/opt/docklet' groupfile = open(fspath+"/global/sys/quota",'r') groups = json.loads(groupfile.read()) groupfile.close() for group in groups: group['quotas']['portmapping'] = 8 group['quotas']['input_rate_limit'] = 10000 group['quotas']['output_rate_limit'] = 10000 groupfile = open(fspath+"/global/sys/quota",'w') groupfile.write(json.dumps(groups)) groupfile.close() quotafile = open(fspath+"/global/sys/quotainfo",'r') quotas = json.loads(quotafile.read()) quotafile.close() if not isexist(quotas['quotainfo'], 'portmapping'): quotas['quotainfo'].append({'name':'portmapping', 'hint':'how many ports the user can map, e.g. 8'}) if not isexist(quotas['quotainfo'], 'input_rate_limit'): quotas['quotainfo'].append({'name':'input_rate_limit', 'hint':'the ingress speed of the network, number of kbps. 0 means the rate are unlimited.'}) if not isexist(quotas['quotainfo'], 'output_rate_limit'): quotas['quotainfo'].append({'name':'output_rate_limit', 'hint':'the egress speed of the network, number of kbps. 0 means the rate are unlimited.'}) quotafile = open(fspath+"/global/sys/quotainfo",'w') quotafile.write(json.dumps(quotas)) quotafile.close() ================================================ FILE: tools/upgrade.py ================================================ #!/usr/bin/python3 import os, json, sys sys.path.append("../src/") from model import db, User from lvmtool import sys_run fspath="/opt/docklet" def update_quotainfo(): if not os.path.exists(fspath+"/global/sys/quotainfo"): print("quotainfo file not exists, please run docklet to init it") return False quotafile = open(fspath+"/global/sys/quotainfo", 'r') quotas = json.loads(quotafile.read()) quotafile.close() if type(quotas) is list: new_quotas = {} new_quotas['default'] = 'foundation' new_quotas['quotainfo'] = quotas quotas = new_quotas print("change the type of quotafile from list to dict") keys = [] for quota in quotas['quotainfo']: keys.append(quota['name']) if 'cpu' not in keys: quotas['quotainfo'].append({'name':'cpu', 'hint':'the cpu quota, number of cores, e.g. 4'}) if 'memory' not in keys: quotas['quotainfo'].append({'name':'memory', 'hint':'the memory quota, number of MB, e.g. 4000'}) if 'disk' not in keys: quotas['quotainfo'].append({'name':'disk', 'hint':'the disk quota, number of MB, e.g. 4000'}) if 'data' not in keys: quotas['quotainfo'].append({'name':'data', 'hint':'the quota of data space, number of GB, e.g. 100'}) if 'image' not in keys: quotas['quotainfo'].append({'name':'image', 'hint':'how many images the user can have, e.g. 8'}) if 'idletime' not in keys: quotas['quotainfo'].append({'name':'idletime', 'hint':'will stop cluster after idletime, number of hours, e.g. 24'}) if 'vnode' not in keys: quotas['quotainfo'].append({'name':'vnode', 'hint':'how many containers the user can have, e.g. 8'}) print("quotainfo updated") quotafile = open(fspath+"/global/sys/quotainfo", 'w') quotafile.write(json.dumps(quotas)) quotafile.close() if not os.path.exists(fspath+"/global/sys/quota"): print("quota file not exists, please run docklet to init it") return False groupfile = open(fspath+"/global/sys/quota",'r') groups = json.loads(groupfile.read()) groupfile.close() for group in groups: if 'cpu' not in group['quotas'].keys(): group['quotas']['cpu'] = "4" if 'memory' not in group['quotas'].keys(): group['quotas']['memory'] = "2000" if 'disk' not in group['quotas'].keys(): group['quotas']['disk'] = "2000" if 'data' not in group['quotas'].keys(): group['quotas']['data'] = "100" if 'image' not in group['quotas'].keys(): group['quotas']['image'] = "10" if 'idletime' not in group['quotas'].keys(): group['quotas']['idletime'] = "24" if 'vnode' not in group['quotas'].keys(): group['quotas']['vnode'] = "8" print("quota updated") groupfile = open(fspath+"/global/sys/quota",'w') groupfile.write(json.dumps(groups)) groupfile.close() def name_error(): quotafile = open(fspath+"/global/sys/quotainfo", 'r') quotas = json.loads(quotafile.read()) quotafile.close() if quotas['default'] == 'fundation': quotas['default'] = 'foundation' quotafile = open(fspath+"/global/sys/quotainfo",'w') quotafile.write(json.dumps(quotas)) quotafile.close() groupfile = open(fspath+"/global/sys/quota", 'r') groups = json.loads(groupfile.read()) groupfile.close() for group in groups: if group['name'] == 'fundation': group['name'] = 'foundation' groupfile = open(fspath+"/global/sys/quota",'w') groupfile.write(json.dumps(groups)) groupfile.close() users = User.query.filter_by(user_group = 'fundation').all() for user in users: user.user_group = 'foundation' db.session.commit() def allquota(): try: quotafile = open(fspath+"/global/sys/quota", 'r') quotas = json.loads(quotafile.read()) quotafile.close() return quotas except Exception as e: print(e) return None def quotaquery(quotaname,quotas): for quota in quotas: if quota['name'] == quotaname: return quota['quotas'] return None def enable_gluster_quota(): conffile=open("../conf/docklet.conf",'r') conf=conffile.readlines() conffile.close() enable = False volume_name = "" for line in conf: if line.startswith("DATA_QUOTA"): keyvalue = line.split("=") if len(keyvalue) < 2: continue key = keyvalue[0].strip() value = keyvalue[1].strip() if value == "YES": enable = True break for line in conf: if line.startswith("DATA_QUOTA_CMD"): keyvalue = line.split("=") if len(keyvalue) < 2: continue volume_name = keyvalue[1].strip() if not enable: print("don't need to enable the quota") return users = User.query.all() quotas = allquota() if quotaquery == None: print("quota info not found") return sys_run("gluster volume quota %s enable" % volume_name) for user in users: quota = quotaquery(user.user_group, quotas) nfs_quota = quota['data'] if nfs_quota == None: print("data quota should be set") return nfspath = "/users/%s/data" % user.username sys_run("gluster volume quota %s limit-usage %s %sGB" % (volume_name,nfspath,nfs_quota)) def update_image(): private_imagepath = fspath + "/global/images/private/" public_imagepath = fspath + "/global/images/public/" userdirs = os.listdir(private_imagepath) for userdir in userdirs: if os.path.isdir(private_imagepath+userdir+"/"): currentdir = private_imagepath+userdir+"/" images = os.listdir(currentdir) for image in images: if os.path.isdir(currentdir+image+"/"): try: sys_run("tar -cvf %s -C %s ." % (currentdir+image+".tz",currentdir+image)) #sys_run("rm -rf %s" % currentdir+image) except Exception as e: print(e) userdirs = os.listdir(public_imagepath) for userdir in userdirs: if os.path.isdir(public_imagepath+userdir+"/"): currentdir = public_imagepath+userdir+"/" images = os.listdir(currentdir) for image in images: if os.path.isdir(currentdir+image+"/"): try: sys_run("tar -cvf %s -C %s ." % (currentdir+image+".tz",currentdir+image)) #sys_run("rm -rf %s" % currentdir+image) except Exception as e: print(e) if __name__ == '__main__': # update_quotainfo() if "fix-name-error" in sys.argv: name_error() # enable_gluster_quota() if "update-image" in sys.argv: update_image() ================================================ FILE: tools/upgrade_file2db.py ================================================ import sys sys.path.append("../src/") import os,json from datetime import datetime from model import db, VCluster, Container, PortMapping, Image, BillingHistory timeFormat = "%Y-%m-%d %H:%M:%S" dockletPath = "/opt/docklet/global" usersdir = dockletPath + "/users/" try: VCluster.query.all() except Exception as err: print("Create database...") db.create_all() print("Update vcluster...") for user in os.listdir(usersdir): tmppath = usersdir+user+"/clusters/" if not os.path.exists(tmppath): continue print("Update User: "+str(user)) clusterfiles = os.listdir(tmppath) for cluname in clusterfiles: cluFile = open(tmppath+cluname,"r") cluinfo = json.loads(cluFile.read()) vcluster = VCluster(cluinfo['clusterid'],cluname,user,cluinfo['status'],cluinfo['size'],cluinfo['nextcid'],cluinfo['proxy_server_ip'],cluinfo['proxy_public_ip']) vcluster.create_time = datetime.strptime(cluinfo['create_time'],timeFormat) vcluster.start_time = cluinfo['start_time'] for coninfo in cluinfo['containers']: lastsavet = datetime.strptime(coninfo['lastsave'],timeFormat) con = Container(coninfo['containername'], coninfo['hostname'], coninfo['ip'], coninfo['host'], coninfo['image'], lastsavet, coninfo['setting']) vcluster.containers.append(con) for pminfo in cluinfo['port_mapping']: pm = PortMapping(pminfo['node_name'], pminfo['node_ip'], int(pminfo['node_port']), int(pminfo['host_port'])) vcluster.port_mapping.append(pm) if "billing_history" in cluinfo.keys(): for nodename in cluinfo['billing_history'].keys(): bhinfo = cluinfo['billing_history'][nodename] bh = BillingHistory(nodename,bhinfo['cpu'],bhinfo['mem'],bhinfo['disk'],bhinfo['port']) vcluster.billing_history.append(bh) try: db.session.add(vcluster) db.session.commit() except Exception as err: print(err) cluFile.close() print("Update Images...") for shareStr in ['private/','public/']: print("Update "+shareStr+" Images...") for user in os.listdir(dockletPath+"/images/"+shareStr): print("Update User: "+user) tmppath = dockletPath+"/images/"+shareStr+user+"/" files = os.listdir(tmppath) images = [] for file in files: if file[0] == "." or file[-3] != ".": continue images.append(file[:-3]) for img in images: infofile = open(tmppath+"."+img+".info","r") imginfo = infofile.read().split('\n') infofile.close() desfile = open(tmppath+"."+img+".description","r") desinfo = desfile.read() dbimage = Image.query.filter_by(imagename=img,ownername=user).first() if dbimage is None: dbimage = Image(img,False,False,user,desinfo) dbimage.create_time = datetime.strptime(imginfo[0],timeFormat) if shareStr == 'public/': dbimage.hasPublic = True else: dbimage.hasPrivate = True try: db.session.add(dbimage) db.session.commit() except Exception as err: print(err) print("Finished!") ================================================ FILE: tools/vimrc.local ================================================ syntax on set smarttab expandtab sw=4 ts=4 set sm ai set hlsearch set wildchar= wildmenu wildmode=full set enc=utf-8 set fileencoding=utf-8 set fileencodings=utf-8,cp936,euc-cn,ascii filetype indent on ================================================ FILE: user/stopreqmgr.py ================================================ import threading, time from httplib2 import Http from urllib.parse import urlencode from queue import Queue from utils import tools, env from utils.log import logger masterips = env.getenv("MASTER_IPS").split(",") G_masterips = [] for masterip in masterips: G_masterips.append(masterip.split("@")[0] + ":" + str(env.getenv("MASTER_PORT"))) # send http request to master def request_master(url,data): global G_masterips #logger.info("master_ip:"+str(G_masterip)) header = {'Content-Type':'application/x-www-form-urlencoded'} http = Http() for masterip in G_masterips: [resp,content] = http.request("http://"+masterip+url,"POST",urlencode(data),headers = header) logger.info("response from master:"+content.decode('utf-8')) class StopAllReqMgr(threading.Thread): def __init__(self, maxsize=100, interval=1): threading.Thread.__init__(self) self.thread_stop = False self.interval = 1 self.q = Queue(maxsize=maxsize) def add_request(self,username): self.q.put(username) def run(self): while not self.thread_stop: username = self.q.get() logger.info("The beans of User(" + str(username) + ") are less than or equal to zero, all his or her vclusters will be stopped.") auth_key = env.getenv('AUTH_KEY') form = {'username':username, 'auth_key':auth_key} request_master("/cluster/stopall/",form) self.q.task_done() time.sleep(self.interval) def stop(self): self.thread_stop = True return ================================================ FILE: user/user.py ================================================ #!/usr/bin/python3 import json import os import getopt import sys, inspect this_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])) src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"..", "src"))) if src_folder not in sys.path: sys.path.insert(0, src_folder) from utils import tools, env config = env.getenv("CONFIG") tools.loadenv(config) # must first init loadenv from utils.log import initlogging initlogging("docklet-user") from utils.log import logger from flask import Flask, request, session, render_template, redirect, send_from_directory, make_response, url_for, abort from functools import wraps from master import userManager,beansapplicationmgr, notificationmgr, lockmgr import threading,traceback from utils.model import User,db from httplib2 import Http from urllib.parse import urlencode from master.settings import settings from master.bugreporter import send_bug_mail from stopreqmgr import StopAllReqMgr external_login = env.getenv('EXTERNAL_LOGIN') if(external_login == 'TRUE'): from userDependence import external_auth app = Flask(__name__) def login_required(func): @wraps(func) def wrapper(*args, **kwargs): global G_usermgr logger.info ("get request, path: %s" % request.path) token = request.form.get("token", None) if (token == None): return json.dumps({'success':'false', 'message':'user or key is null'}) cur_user = G_usermgr.auth_token(token) if (cur_user == None): return json.dumps({'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'}) return func(cur_user, cur_user.username, request.form, *args, **kwargs) return wrapper def auth_key_required(func): @wraps(func) def wrapper(*args, **kwargs): key_1 = env.getenv('AUTH_KEY') key_2 = request.form.get("auth_key",None) #logger.info(str(ip) + " " + str(G_userip)) if key_2 is not None and key_1 == key_2: return func(*args, **kwargs) else: return json.dumps({'success':'false','message': 'auth_key is required!'}) return wrapper @app.route("/login/", methods=['POST']) def login(): global G_usermgr logger.info("handle request : user login") user = request.form.get("user", None) key = request.form.get("key", None) userip = request.form.get("ip", "") if user == None or key == None: return json.dumps({'success': 'false', 'message':'user or key is null'}) auth_result = G_usermgr.auth(user,key,userip) if auth_result['success'] == 'false': logger.info("%s login failed" % user) return json.dumps({'success':'false', 'message':auth_result['reason']}) logger.info("%s login success" % user) return json.dumps({'success':'true', 'action':'login', 'data': auth_result['data']}) @app.route('/external_login/', methods=['POST']) def external_login(): global G_usermgr logger.info("handle request : external user login") userip = request.form.get("ip", "") try: result = G_usermgr.auth_external(request.form,userip) return json.dumps(result) except: result = {'success':'false', 'reason':'Something wrong happened when auth an external account'} return json.dumps(result) @app.route("/register/", methods=['POST']) def register(): global G_usermgr if request.form.get('activate', None) == None: logger.info ("handle request : user register") username = request.form.get('username', '') password = request.form.get('password', '') email = request.form.get('email', '') description = request.form.get('description','') if (username == '' or password == '' or email == ''): return json.dumps({'success':'false'}) newuser = G_usermgr.newuser() newuser.username = request.form.get('username') newuser.password = request.form.get('password','') newuser.e_mail = request.form.get('email','') newuser.student_number = request.form.get('studentnumber','') newuser.department = request.form.get('department','') newuser.nickname = request.form.get('truename','') newuser.truename = request.form.get('truename','') newuser.description = request.form.get('description','') newuser.status = "init" newuser.auth_method = "local" result = G_usermgr.register(user = newuser) return json.dumps(result) else: logger.info ("handle request, user activating") token = request.form.get("token", None) if (token == None): return json.dumps({'success':'false', 'message':'user or key is null'}) cur_user = G_usermgr.auth_token(token) if (cur_user == None): return json.dumps({'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'}) newuser = G_usermgr.newuser() newuser.username = cur_user.username newuser.nickname = cur_user.truename newuser.password = cur_user.password newuser.status = 'applying' newuser.user_group = cur_user.user_group newuser.auth_method = cur_user.auth_method newuser.e_mail = request.form.get('email','') newuser.student_number = request.form.get('studentnumber', '') newuser.department = request.form.get('department', '') newuser.truename = request.form.get('truename', '') newuser.tel = request.form.get('tel', '') newuser.description = request.form.get('description', '') result = G_usermgr.register(user = newuser) userManager.send_remind_activating_email(newuser.username) return json.dumps(result) @app.route("/authtoken/", methods=['POST']) @login_required def auth_token(cur_user, user, form): logger.info("authing") req = json.dumps({'success':'true','username':cur_user.username,'beans':cur_user.beans}) logger.info("auth success") return req @app.route("/user/modify/", methods=['POST']) @login_required def modify_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/modify/") result = G_usermgr.modify(newValue = form, cur_user = cur_user) return json.dumps(result) @app.route("/user/groupModify/", methods=['POST']) @login_required def groupModify_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/groupModify/") G_lockmgr.acquire('__quotafile') result = G_usermgr.groupModify(newValue = form, cur_user = cur_user) G_lockmgr.release('__quotafile') return json.dumps(result) @app.route("/user/query/", methods=['POST']) @login_required def query_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/query/") #result = G_usermgr.query(ID = form.get("ID"), cur_user = cur_user) if (form.get("ID", None) != None): result = G_usermgr.query(ID = form.get("ID"), cur_user = cur_user) else: result = G_usermgr.query(username = user, cur_user = cur_user) if (result.get('success', None) == None or result.get('success', None) == "false"): return json.dumps(result) else: result = G_usermgr.queryForDisplay(user = result['token']) return json.dumps(result) @app.route("/user/add/", methods=['POST']) @login_required def add_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/add/") user = G_usermgr.newuser(cur_user = cur_user) user.username = form.get('username', None) user.password = form.get('password', None) user.e_mail = form.get('e_mail', '') user.status = "normal" result = G_usermgr.register(user = user, cur_user = cur_user) return json.dumps(result) @app.route("/user/groupadd/", methods=['POST']) @login_required def groupadd_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/groupadd/") G_lockmgr.acquire('__quotafile') result = G_usermgr.groupadd(form = form, cur_user = cur_user) G_lockmgr.release('__quotafile') return json.dumps(result) @app.route("/user/chdefault/", methods=['POST']) @login_required def chdefault(cur_user, user, form): global G_usermgr logger.info("handle request: user/chdefault/") G_lockmgr.acquire('__quotafile') result = G_usermgr.change_default_group(form = form, cur_user = cur_user) G_lockmgr.release('__quotafile') return json.dumps(result) @app.route("/user/quotaadd/", methods=['POST']) @login_required def quotaadd_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/quotaadd/") G_lockmgr.acquire('__quotafile') result = G_usermgr.quotaadd(form = form, cur_user = cur_user) G_lockmgr.release('__quotafile') return json.dumps(result) @app.route("/user/groupdel/", methods=['POST']) @login_required def groupdel_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/groupdel/") G_lockmgr.acquire('__quotafile') result = G_usermgr.groupdel(name = form.get('name', None), cur_user = cur_user) G_lockmgr.release('__quotafile') return json.dumps(result) @app.route("/user/data/", methods=['POST']) @login_required def data_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/data/") result = G_usermgr.userList(cur_user = cur_user) return json.dumps(result) @app.route("/user/groupNameList/", methods=['POST']) @login_required def groupNameList_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/groupNameList/") result = G_usermgr.groupListName(cur_user = cur_user) return json.dumps(result) @app.route("/user/groupList/", methods=['POST']) @login_required def groupList_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/groupList/") result = G_usermgr.groupList(cur_user = cur_user) return json.dumps(result) @app.route("/user/groupQuery/", methods=['POST']) @login_required def groupQuery_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/groupQuery/") result = G_usermgr.groupQuery(name = form.get("name"), cur_user = cur_user) return json.dumps(result) @app.route("/user/selfQuery/", methods=['POST']) @login_required def selfQuery_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/selfQuery/") result = G_usermgr.selfQuery(cur_user = cur_user) return json.dumps(result) @app.route("/master/user/recoverinfo/", methods=['POST']) @auth_key_required def get_master_recoverinfo(): username = request.form.get("username",None) if username is None: return json.dumps({'success':'false', 'message':'username field is required.'}) else: user = User.query.filter_by(username=username).first() return json.dumps({'success':'true', 'uid':user.id, 'email':user.e_mail, 'groupname':user.user_group}) @app.route("/master/user/groupinfo/", methods=['POST']) @auth_key_required def get_master_groupinfo(): fspath = env.getenv('FS_PREFIX') groupfile = open(fspath+"/global/sys/quota",'r') groups = json.loads(groupfile.read()) groupfile.close() return json.dumps({'success':'true', 'groups':json.dumps(groups)}) @app.route("/master/user/usageRelease/", methods=['POST']) @auth_key_required def usageRelease_master(): global G_usermgr logger.info("handle request: /master/user/usageRelease/") form = request.form user = form.get("username",None) cur_user = User.query.filter_by(username=user).first() if user is None or cur_user is None: return json.dumps({'success':'false', 'message':'Null username field or user does not exist.'}) G_lockmgr.acquire('__usage_'+str(user)) result = G_usermgr.usageRelease(cur_user = cur_user, cpu = form.get('cpu'), memory = form.get('memory'), disk = form.get('disk')) G_lockmgr.release('__usage_'+str(user)) return json.dumps(result) @app.route("/user/selfModify/", methods=['POST']) @login_required def selfModify_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/selfModify/") result = G_usermgr.selfModify(cur_user = cur_user, newValue = form) return json.dumps(result) @app.route("/user/usageQuery/" , methods=['POST']) @login_required def usageQuery_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/usageQuery/") result = G_usermgr.usageQuery(cur_user = cur_user) return json.dumps(result) @app.route("/user/usageInc/", methods=['POST']) @login_required def usageInc_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/usageInc/") setting = form.get('setting') G_lockmgr.acquire('__usage_'+str(user)) result = G_usermgr.usageInc(cur_user = cur_user, modification = json.loads(setting)) G_lockmgr.release('__usage_'+str(user)) return json.dumps(result) @app.route("/user/usageRelease/", methods=['POST']) @login_required def usageRelease_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/usageInc/") G_lockmgr.acquire('__usage_'+str(user)) result = G_usermgr.usageRelease(cur_user = cur_user, cpu = form.get('cpu'), memory = form.get('memory'), disk = form.get('disk')) G_lockmgr.release('__usage_'+str(user)) return json.dumps(result) @app.route("/user/usageRecover/", methods=['POST']) @login_required def usageRecover_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/usageInc/") G_lockmgr.acquire('__usage_'+str(user)) result = G_usermgr.usageRecover(cur_user = cur_user, modification = json.loads(form.get('setting'))) G_lockmgr.release('__usage_'+str(user)) return json.dumps(result) @app.route("/user/lxcsettingList/", methods=['POST']) @login_required def lxcsettingList_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/lxcsettingList/") result = G_usermgr.lxcsettingList(cur_user = cur_user, form = form) return json.dumps(result) @app.route("/user/chlxcsetting/", methods=['POST']) @login_required def chlxcsetting_user(cur_user, user, form): global G_usermgr logger.info("handle request: user/chlxcsetting/") G_lockmgr.acquire('__lxcsetting') result = G_usermgr.chlxcsetting(cur_user = cur_user, form = form) G_lockmgr.release('__lxcsetting') return json.dumps(result) @app.route("/settings/list/", methods=['POST']) @login_required def settings_list(cur_user, user, form): return json.dumps(settings.list(user_group = 'admin')) @app.route("/settings/update/", methods=['POST']) @login_required def settings_update(user, beans, form): newSetting = {} newSetting['OPEN_REGISTRY'] = form.get('OPEN_REGISTRY','') newSetting['APPROVAL_RBT'] = form.get('APPROVAL_RBT','') newSetting['ADMIN_EMAIL_ADDRESS'] = form.get('ADMIN_EMAIL_ADDRESS', '') newSetting['EMAIL_FROM_ADDRESS'] = form.get('EMAIL_FROM_ADDRESS', '') return json.dumps(settings.update(user_group = 'admin', newSetting = newSetting)) @app.route("/notification/list/", methods=['POST']) @login_required def list_notifications(cur_user, user, form): global G_notificationmgr logger.info("handle request: notification/list/") result = G_notificationmgr.list_notifications(cur_user=cur_user, form=form) return json.dumps(result) @app.route("/notification/create/", methods=['POST']) @login_required def create_notification(cur_user, user, form): global G_notificationmgr logger.info("handle request: notification/create/") G_lockmgr.acquire('__notification') result = G_notificationmgr.create_notification(cur_user=cur_user, form=form) G_lockmgr.release('__notification') return json.dumps(result) @app.route("/notification/modify/", methods=['POST']) @login_required def modify_notification(cur_user, user, form): global G_notificationmgr logger.info("handle request: notification/modify/") G_lockmgr.acquire('__notification') result = G_notificationmgr.modify_notification(cur_user=cur_user, form=form) G_lockmgr.release('__notification') return json.dumps(result) @app.route("/notification/delete/", methods=['POST']) @login_required def delete_notification(cur_user, user, form): global G_notificationmgr logger.info("handle request: notification/delete/") G_lockmgr.acquire('__notification') result = G_notificationmgr.delete_notification(cur_user=cur_user, form=form) G_lockmgr.release('__notification') return json.dumps(result) @app.route("/notification/query_self/", methods=['POST']) @login_required def query_self_notification_simple_infos(cur_user, user, form): global G_notificationmgr logger.info("handle request: notification/query_self/") result = G_notificationmgr.query_self_notification_simple_infos(cur_user=cur_user, form=form) return json.dumps(result) @app.route("/notification/query/", methods=['POST']) @login_required def query_notification(cur_user, user, form): global G_notificationmgr logger.info("handle request: notification/query/") result = G_notificationmgr.query_notification(cur_user=cur_user, form=form) return json.dumps(result) @app.route("/notification/query/all/", methods=['POST']) @login_required def query_self_notifications_infos(cur_user, user, form): global G_notificationmgr logger.info("handle request: notification/query/all/") result = G_notificationmgr.query_self_notifications_infos(cur_user=cur_user, form=form) return json.dumps(result) @app.route("/bug/report/", methods=['POST']) @login_required def report_bug(cur_user, user, form): logger.info("handle request: bug/report") result = send_bug_mail(user, form.get("bugmessage", None)) return json.dumps(result) @app.route("/billing/beans/", methods=['POST']) @auth_key_required def billing_beans(): logger.info("handle request: /billing/beans/") form = request.form owner_name = form.get("owner_name",None) billing = int(form.get("billing",None)) if owner_name is None or billing is None: return json.dumps({'success':'false', 'message':'owner_name and beans fields are required.'}) G_lockmgr.acquire('__beans_'+str(owner_name)) # update users' tables in database owner = User.query.filter_by(username=owner_name).first() if owner is None: logger.warning("Error!!! Billing User %s doesn't exist!" % (owner_name)) else: #logger.info("Billing User:"+str(owner)) oldbeans = owner.beans owner.beans -= billing #logger.info(str(oldbeans) + " " + str(owner.beans)) if oldbeans > 0 and owner.beans <= 0 or oldbeans >= 100 and owner.beans < 100 or oldbeans >= 500 and owner.beans < 500 or oldbeans >= 1000 and owner.beans < 1000: # send mail to remind users of their beans if their beans decrease to 0,100,500 and 1000 data = {"to_address":owner.e_mail,"username":owner.username,"beans":owner.beans} # request_master("/beans/mail/",data) beansapplicationmgr.send_beans_email(owner.e_mail,owner.username,int(owner.beans)) try: db.session.commit() except Exception as err: db.session.rollback() logger.warning(traceback.format_exc()) logger.warning(err) G_lockmgr.release('__beans_'+str(owner_name)) return json.dumps({'success':'false', 'message':'Fail to wirte to databases.'}) #logger.info("Billing User:"+str(owner)) if owner.beans <= 0: # stop all vcluster of the user if his beans are equal to or lower than 0. logger.info("The beans of User(" + str(owner) + ") are less than or equal to zero, add request to queue.") G_stopreqmgr.add_request(owner.username) G_lockmgr.release('__beans_'+str(owner_name)) return json.dumps({'success':'true'}) @app.route("/beans//", methods=['POST']) @login_required def beans_apply(cur_user,user,form,issue): global G_applicationmgr if issue == 'apply': if not cur_user.status == 'normal': return json.dumps({'success':'false', 'message':'Fail to apply for beans because your account is locked/not activated. Please:'+ '\n 1. Complete your information and activate your account. \n Or: \n 2.Contact administor for further information'}) number = form.get("number",None) reason = form.get("reason",None) if number is None or reason is None: return json.dumps({'success':'false', 'message':'Number and reason can\'t be null.'}) G_lockmgr.acquire('__beansapply_'+str(user)) [success,message] = G_applicationmgr.apply(user,number,reason) G_lockmgr.release('__beansapply_'+str(user)) if not success: return json.dumps({'success':'false', 'message':message}) else: return json.dumps({'success':'true'}) elif issue == 'applymsgs': applymsgs = G_applicationmgr.query(user) return json.dumps({'success':'true','applymsgs':applymsgs}) else: return json.dumps({'success':'false','message':'Unsupported URL!'}) @app.route("/beans/admin//", methods=['POST']) @login_required def beans_admin(cur_user,user,form,issue): global G_applicationmgr if issue == 'applymsgs': result = G_applicationmgr.queryUnRead(cur_user = cur_user) logger.debug("applymsg success") return json.dumps(result) elif issue == 'agree': msgid = form.get("msgid",None) username = form.get("username",None) if msgid is None or username is None: return json.dumps({'success':'false', 'message':'msgid and username can\'t be null.'}) G_lockmgr.acquire("__beans_"+str(username)) G_lockmgr.acquire("__applymsg_"+str(msgid)) result = G_applicationmgr.agree(msgid, cur_user = cur_user) G_lockmgr.release("__applymsg_"+str(msgid)) G_lockmgr.release("__beans_"+str(username)) return json.dumps(result) elif issue == 'reject': msgid = form.get("msgid",None) if msgid is None: return json.dumps({'success':'false', 'message':'msgid can\'t be null.'}) G_lockmgr.acquire("__applymsg_"+str(msgid)) result = G_applicationmgr.reject(msgid, cur_user = cur_user) G_lockmgr.release("__applymsg_"+str(msgid)) return json.dumps(result) else: return json.dumps({'success':'false', 'message':'Unsupported URL!'}) @app.errorhandler(500) def internal_server_error(error): logger.debug("An internel server error occured") logger.error(traceback.format_exc()) return json.dumps({'success':'false', 'message':'500 Internal Server Error', 'Unauthorized': 'True'}) if __name__ == '__main__': logger.info('Start Flask...:') try: secret_key_file = open(env.getenv('FS_PREFIX') + '/local/user_secret_key.txt') app.secret_key = secret_key_file.read() secret_key_file.close() except: from base64 import b64encode from os import urandom secret_key = urandom(24) secret_key = b64encode(secret_key).decode('utf-8') app.secret_key = secret_key secret_key_file = open(env.getenv('FS_PREFIX') + '/local/user_secret_key.txt', 'w') secret_key_file.write(secret_key) secret_key_file.close() os.environ['APP_KEY'] = app.secret_key runcmd = sys.argv[0] app.runpath = runcmd.rsplit('/', 1)[0] global G_usermgr global G_notificationmgr global G_sysmgr global G_historymgr global G_applicationmgr global G_lockmgr global G_stopreqmgr fs_path = env.getenv("FS_PREFIX") logger.info("using FS_PREFIX %s" % fs_path) mode = 'recovery' if len(sys.argv) > 1 and sys.argv[1] == "new": mode = 'new' G_lockmgr = lockmgr.LockMgr() G_usermgr = userManager.userManager('root') #if mode == "new": # G_usermgr.initUsage() G_notificationmgr = notificationmgr.NotificationMgr() #userip = env.getenv('USER_IP') userip = "0.0.0.0" logger.info("using USER_IP %s", userip) #userport = env.getenv('USER_PORT') userport = 9100 logger.info("using USER_PORT %d", int(userport)) G_applicationmgr = beansapplicationmgr.ApplicationMgr() approvalrbt = beansapplicationmgr.ApprovalRobot() if(env.getenv("APPROVAL_RBT") == "ON"): approvalrbt .start() logger.info("ApprovalRobot is started.") else: logger.info("ApprovalRobot is not started.") G_stopreqmgr = StopAllReqMgr() G_stopreqmgr.start() # server = http.server.HTTPServer((masterip, masterport), DockletHttpHandler) logger.info("starting user server") app.run(host = userip, port = userport, threaded=True,) ================================================ FILE: web/static/css/docklet.css ================================================ .btn-outline, .btn-outline-default, .badge-outline, .badge-outline-default, .label-outline, .label-outline-default { border: 1px solid #AAB2BD; background-color: transparent; color: #434A54; } .btn-outline-success, .badge-outline-success, .label-outline-success { border: 1px solid #1C84C6; background-color: transparent; color: #1C84C6; } .btn-outline-warning, .badge-outline-warning, .label-outline-warning { border: 1px solid #F8AC59; background-color: transparent; color: #F8AC59; } .btn-outline-default:hover, .btn-outline:hover { border: 1px solid #AAB2BD; background-color: #AAB2BD; color: #434A54; } .btn-outline-success:hover { border: 1px solid #1C84C6; background-color: #1C84C6; color: #FFFFFF; } .btn-outline-warning:hover { border: 1px solid #F8AC59; background-color: #F8AC59; color: #FFFFFF; } .docklet-red-block{ background-color: #EB4235; color: #FFFFFF; } .docklet-green-block{ background-color: #7DB600; color: #FFFFFF; } .docklet-yellow-block{ background-color: #FABC05; color: #FFFFFF; } .docklet-blue-block{ background-color: #4185F6; color: #FFFFFF; } ================================================ FILE: web/static/dist/css/AdminLTE.css ================================================ /*! * AdminLTE v2.3.2 * Author: Almsaeed Studio * Website: Almsaeed Studio * License: Open source - MIT * Please visit http://opensource.org/licenses/MIT for more information !*/ /* * Core: General Layout Style * ------------------------- */ html, body { min-height: 100%; } .layout-boxed html, .layout-boxed body { height: 100%; } body { -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; font-family: 'Source Sans Pro', 'Helvetica Neue', Helvetica, Arial, sans-serif; font-weight: 400; overflow-x: hidden; overflow-y: auto; } /* Layout */ .wrapper { min-height: 100%; position: relative; overflow: hidden; } .wrapper:before, .wrapper:after { content: " "; display: table; } .wrapper:after { clear: both; } .layout-boxed .wrapper { max-width: 1250px; margin: 0 auto; min-height: 100%; box-shadow: 0 0 8px rgba(0, 0, 0, 0.5); position: relative; } .layout-boxed { background: url('../img/boxed-bg.jpg') repeat fixed; } /* * Content Wrapper - contains the main content * ```.right-side has been deprecated as of v2.0.0 in favor of .content-wrapper ``` */ .content-wrapper, .right-side, .main-footer { -webkit-transition: -webkit-transform 0.3s ease-in-out, margin 0.3s ease-in-out; -moz-transition: -moz-transform 0.3s ease-in-out, margin 0.3s ease-in-out; -o-transition: -o-transform 0.3s ease-in-out, margin 0.3s ease-in-out; transition: transform 0.3s ease-in-out, margin 0.3s ease-in-out; margin-left: 230px; z-index: 820; } .layout-top-nav .content-wrapper, .layout-top-nav .right-side, .layout-top-nav .main-footer { margin-left: 0; } @media (max-width: 767px) { .content-wrapper, .right-side, .main-footer { margin-left: 0; } } @media (min-width: 768px) { .sidebar-collapse .content-wrapper, .sidebar-collapse .right-side, .sidebar-collapse .main-footer { margin-left: 0; } } @media (max-width: 767px) { .sidebar-open .content-wrapper, .sidebar-open .right-side, .sidebar-open .main-footer { -webkit-transform: translate(230px, 0); -ms-transform: translate(230px, 0); -o-transform: translate(230px, 0); transform: translate(230px, 0); } } .content-wrapper, .right-side { min-height: 100%; background-color: #ecf0f5; z-index: 800; } .main-footer { background: #fff; padding: 15px; color: #444; border-top: 1px solid #d2d6de; } /* Fixed layout */ .fixed .main-header, .fixed .main-sidebar, .fixed .left-side { position: fixed; } .fixed .main-header { top: 0; right: 0; left: 0; } .fixed .content-wrapper, .fixed .right-side { padding-top: 50px; } @media (max-width: 767px) { .fixed .content-wrapper, .fixed .right-side { padding-top: 100px; } } .fixed.layout-boxed .wrapper { max-width: 100%; } body.hold-transition .content-wrapper, body.hold-transition .right-side, body.hold-transition .main-footer, body.hold-transition .main-sidebar, body.hold-transition .left-side, body.hold-transition .main-header > .navbar, body.hold-transition .main-header .logo { /* Fix for IE */ -webkit-transition: none; -o-transition: none; transition: none; } /* Content */ .content { min-height: 250px; padding: 15px; margin-right: auto; margin-left: auto; padding-left: 15px; padding-right: 15px; } /* H1 - H6 font */ h1, h2, h3, h4, h5, h6, .h1, .h2, .h3, .h4, .h5, .h6 { font-family: 'Source Sans Pro', sans-serif; } /* General Links */ a { color: #3c8dbc; } a:hover, a:active, a:focus { outline: none; text-decoration: none; color: #72afd2; } /* Page Header */ .page-header { margin: 10px 0 20px 0; font-size: 22px; } .page-header > small { color: #666; display: block; margin-top: 5px; } /* * Component: Main Header * ---------------------- */ .main-header { position: relative; max-height: 100px; z-index: 1030; } .main-header > .navbar { -webkit-transition: margin-left 0.3s ease-in-out; -o-transition: margin-left 0.3s ease-in-out; transition: margin-left 0.3s ease-in-out; margin-bottom: 0; margin-left: 230px; border: none; min-height: 50px; border-radius: 0; } .layout-top-nav .main-header > .navbar { margin-left: 0; } .main-header #navbar-search-input.form-control { background: rgba(255, 255, 255, 0.2); border-color: transparent; } .main-header #navbar-search-input.form-control:focus, .main-header #navbar-search-input.form-control:active { border-color: rgba(0, 0, 0, 0.1); background: rgba(255, 255, 255, 0.9); } .main-header #navbar-search-input.form-control::-moz-placeholder { color: #ccc; opacity: 1; } .main-header #navbar-search-input.form-control:-ms-input-placeholder { color: #ccc; } .main-header #navbar-search-input.form-control::-webkit-input-placeholder { color: #ccc; } .main-header .navbar-custom-menu, .main-header .navbar-right { float: right; } @media (max-width: 991px) { .main-header .navbar-custom-menu a, .main-header .navbar-right a { color: inherit; background: transparent; } } @media (max-width: 767px) { .main-header .navbar-right { float: none; } .navbar-collapse .main-header .navbar-right { margin: 7.5px -15px; } .main-header .navbar-right > li { color: inherit; border: 0; } } .main-header .sidebar-toggle { float: left; background-color: transparent; background-image: none; padding: 15px 15px; font-family: fontAwesome; } .main-header .sidebar-toggle:before { content: "\f0c9"; } .main-header .sidebar-toggle:hover { color: #fff; } .main-header .sidebar-toggle:focus, .main-header .sidebar-toggle:active { background: transparent; } .main-header .sidebar-toggle .icon-bar { display: none; } .main-header .navbar .nav > li.user > a > .fa, .main-header .navbar .nav > li.user > a > .glyphicon, .main-header .navbar .nav > li.user > a > .ion { margin-right: 5px; } .main-header .navbar .nav > li > a > .label { position: absolute; top: 9px; right: 7px; text-align: center; font-size: 9px; padding: 2px 3px; line-height: .9; } .main-header .logo { -webkit-transition: width 0.3s ease-in-out; -o-transition: width 0.3s ease-in-out; transition: width 0.3s ease-in-out; display: block; float: left; height: 50px; font-size: 20px; line-height: 50px; text-align: center; width: 230px; font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; padding: 0 15px; font-weight: 300; overflow: hidden; } .main-header .logo .logo-lg { display: block; } .main-header .logo .logo-mini { display: none; } .main-header .navbar-brand { color: #fff; } .content-header { position: relative; padding: 15px 15px 0 15px; } .content-header > h1 { margin: 0; font-size: 24px; } .content-header > h1 > small { font-size: 15px; display: inline-block; padding-left: 4px; font-weight: 300; } .content-header > .breadcrumb { float: right; background: transparent; margin-top: 0; margin-bottom: 0; font-size: 12px; padding: 7px 5px; position: absolute; top: 15px; right: 10px; border-radius: 2px; } .content-header > .breadcrumb > li > a { color: #444; text-decoration: none; display: inline-block; } .content-header > .breadcrumb > li > a > .fa, .content-header > .breadcrumb > li > a > .glyphicon, .content-header > .breadcrumb > li > a > .ion { margin-right: 5px; } .content-header > .breadcrumb > li + li:before { content: '>\00a0'; } @media (max-width: 991px) { .content-header > .breadcrumb { position: relative; margin-top: 5px; top: 0; right: 0; float: none; background: #d2d6de; padding-left: 10px; } .content-header > .breadcrumb li:before { color: #97a0b3; } } .navbar-toggle { color: #fff; border: 0; margin: 0; padding: 15px 15px; } @media (max-width: 991px) { .navbar-custom-menu .navbar-nav > li { float: left; } .navbar-custom-menu .navbar-nav { margin: 0; float: left; } .navbar-custom-menu .navbar-nav > li > a { padding-top: 15px; padding-bottom: 15px; line-height: 20px; } } @media (max-width: 767px) { .main-header { position: relative; } .main-header .logo, .main-header .navbar { width: 100%; float: none; } .main-header .navbar { margin: 0; } .main-header .navbar-custom-menu { float: right; } } @media (max-width: 991px) { .navbar-collapse.pull-left { float: none !important; } .navbar-collapse.pull-left + .navbar-custom-menu { display: block; position: absolute; top: 0; right: 40px; } } /* * Component: Sidebar * ------------------ */ .main-sidebar, .left-side { position: absolute; top: 0; left: 0; padding-top: 50px; min-height: 100%; width: 230px; z-index: 810; -webkit-transition: -webkit-transform 0.3s ease-in-out, width 0.3s ease-in-out; -moz-transition: -moz-transform 0.3s ease-in-out, width 0.3s ease-in-out; -o-transition: -o-transform 0.3s ease-in-out, width 0.3s ease-in-out; transition: transform 0.3s ease-in-out, width 0.3s ease-in-out; } @media (max-width: 767px) { .main-sidebar, .left-side { padding-top: 100px; } } @media (max-width: 767px) { .main-sidebar, .left-side { -webkit-transform: translate(-230px, 0); -ms-transform: translate(-230px, 0); -o-transform: translate(-230px, 0); transform: translate(-230px, 0); } } @media (min-width: 768px) { .sidebar-collapse .main-sidebar, .sidebar-collapse .left-side { -webkit-transform: translate(-230px, 0); -ms-transform: translate(-230px, 0); -o-transform: translate(-230px, 0); transform: translate(-230px, 0); } } @media (max-width: 767px) { .sidebar-open .main-sidebar, .sidebar-open .left-side { -webkit-transform: translate(0, 0); -ms-transform: translate(0, 0); -o-transform: translate(0, 0); transform: translate(0, 0); } } .sidebar { padding-bottom: 10px; } .sidebar-form input:focus { border-color: transparent; } .user-panel { position: relative; width: 100%; padding: 10px; overflow: hidden; } .user-panel:before, .user-panel:after { content: " "; display: table; } .user-panel:after { clear: both; } .user-panel > .image > img { width: 100%; max-width: 45px; height: auto; } .user-panel > .info { padding: 5px 5px 5px 15px; line-height: 1; position: absolute; left: 55px; } .user-panel > .info > p { font-weight: 600; margin-bottom: 9px; } .user-panel > .info > a { text-decoration: none; padding-right: 5px; margin-top: 3px; font-size: 11px; } .user-panel > .info > a > .fa, .user-panel > .info > a > .ion, .user-panel > .info > a > .glyphicon { margin-right: 3px; } .sidebar-menu { list-style: none; margin: 0; padding: 0; } .sidebar-menu > li { position: relative; margin: 0; padding: 0; } .sidebar-menu > li > a { padding: 12px 5px 12px 15px; display: block; } .sidebar-menu > li > a > .fa, .sidebar-menu > li > a > .glyphicon, .sidebar-menu > li > a > .ion { width: 20px; } .sidebar-menu > li .label, .sidebar-menu > li .badge { margin-top: 3px; margin-right: 5px; } .sidebar-menu li.header { padding: 10px 25px 10px 15px; font-size: 12px; } .sidebar-menu li > a > .fa-angle-left { width: auto; height: auto; padding: 0; margin-right: 10px; margin-top: 3px; } .sidebar-menu li.active > a > .fa-angle-left { -webkit-transform: rotate(-90deg); -ms-transform: rotate(-90deg); -o-transform: rotate(-90deg); transform: rotate(-90deg); } .sidebar-menu li.active > .treeview-menu { display: block; } .sidebar-menu .treeview-menu { display: none; list-style: none; padding: 0; margin: 0; padding-left: 5px; } .sidebar-menu .treeview-menu .treeview-menu { padding-left: 20px; } .sidebar-menu .treeview-menu > li { margin: 0; } .sidebar-menu .treeview-menu > li > a { padding: 5px 5px 5px 15px; display: block; font-size: 14px; } .sidebar-menu .treeview-menu > li > a > .fa, .sidebar-menu .treeview-menu > li > a > .glyphicon, .sidebar-menu .treeview-menu > li > a > .ion { width: 20px; } .sidebar-menu .treeview-menu > li > a > .fa-angle-left, .sidebar-menu .treeview-menu > li > a > .fa-angle-down { width: auto; } /* * Component: Sidebar Mini */ @media (min-width: 768px) { .sidebar-mini.sidebar-collapse .content-wrapper, .sidebar-mini.sidebar-collapse .right-side, .sidebar-mini.sidebar-collapse .main-footer { margin-left: 50px !important; z-index: 840; } .sidebar-mini.sidebar-collapse .main-sidebar { -webkit-transform: translate(0, 0); -ms-transform: translate(0, 0); -o-transform: translate(0, 0); transform: translate(0, 0); width: 50px !important; z-index: 850; } .sidebar-mini.sidebar-collapse .sidebar-menu > li { position: relative; } .sidebar-mini.sidebar-collapse .sidebar-menu > li > a { margin-right: 0; } .sidebar-mini.sidebar-collapse .sidebar-menu > li > a > span { border-top-right-radius: 4px; } .sidebar-mini.sidebar-collapse .sidebar-menu > li:not(.treeview) > a > span { border-bottom-right-radius: 4px; } .sidebar-mini.sidebar-collapse .sidebar-menu > li > .treeview-menu { padding-top: 5px; padding-bottom: 5px; border-bottom-right-radius: 4px; } .sidebar-mini.sidebar-collapse .sidebar-menu > li:hover > a > span:not(.pull-right), .sidebar-mini.sidebar-collapse .sidebar-menu > li:hover > .treeview-menu { display: block !important; position: absolute; width: 180px; left: 50px; } .sidebar-mini.sidebar-collapse .sidebar-menu > li:hover > a > span { top: 0; margin-left: -3px; padding: 12px 5px 12px 20px; background-color: inherit; } .sidebar-mini.sidebar-collapse .sidebar-menu > li:hover > .treeview-menu { top: 44px; margin-left: 0; } .sidebar-mini.sidebar-collapse .main-sidebar .user-panel > .info, .sidebar-mini.sidebar-collapse .sidebar-form, .sidebar-mini.sidebar-collapse .sidebar-menu > li > a > span, .sidebar-mini.sidebar-collapse .sidebar-menu > li > .treeview-menu, .sidebar-mini.sidebar-collapse .sidebar-menu > li > a > .pull-right, .sidebar-mini.sidebar-collapse .sidebar-menu li.header { display: none !important; -webkit-transform: translateZ(0); } .sidebar-mini.sidebar-collapse .main-header .logo { width: 50px; } .sidebar-mini.sidebar-collapse .main-header .logo > .logo-mini { display: block; margin-left: -15px; margin-right: -15px; font-size: 18px; } .sidebar-mini.sidebar-collapse .main-header .logo > .logo-lg { display: none; } .sidebar-mini.sidebar-collapse .main-header .navbar { margin-left: 50px; } } .sidebar-menu, .main-sidebar .user-panel, .sidebar-menu > li.header { white-space: nowrap; overflow: hidden; } .sidebar-menu:hover { overflow: visible; } .sidebar-form, .sidebar-menu > li.header { overflow: hidden; text-overflow: clip; } .sidebar-menu li > a { position: relative; } .sidebar-menu li > a > .pull-right { position: absolute; right: 10px; top: 50%; margin-top: -7px; } /* * Component: Control sidebar. By default, this is the right sidebar. */ .control-sidebar-bg { position: fixed; z-index: 1000; bottom: 0; } .control-sidebar-bg, .control-sidebar { top: 0; right: -230px; width: 230px; -webkit-transition: right 0.3s ease-in-out; -o-transition: right 0.3s ease-in-out; transition: right 0.3s ease-in-out; } .control-sidebar { position: absolute; padding-top: 50px; z-index: 1010; } @media (max-width: 768px) { .control-sidebar { padding-top: 100px; } } .control-sidebar > .tab-content { padding: 10px 15px; } .control-sidebar.control-sidebar-open, .control-sidebar.control-sidebar-open + .control-sidebar-bg { right: 0; } .control-sidebar-open .control-sidebar-bg, .control-sidebar-open .control-sidebar { right: 0; } @media (min-width: 768px) { .control-sidebar-open .content-wrapper, .control-sidebar-open .right-side, .control-sidebar-open .main-footer { margin-right: 230px; } } .nav-tabs.control-sidebar-tabs > li:first-of-type > a, .nav-tabs.control-sidebar-tabs > li:first-of-type > a:hover, .nav-tabs.control-sidebar-tabs > li:first-of-type > a:focus { border-left-width: 0; } .nav-tabs.control-sidebar-tabs > li > a { border-radius: 0; } .nav-tabs.control-sidebar-tabs > li > a, .nav-tabs.control-sidebar-tabs > li > a:hover { border-top: none; border-right: none; border-left: 1px solid transparent; border-bottom: 1px solid transparent; } .nav-tabs.control-sidebar-tabs > li > a .icon { font-size: 16px; } .nav-tabs.control-sidebar-tabs > li.active > a, .nav-tabs.control-sidebar-tabs > li.active > a:hover, .nav-tabs.control-sidebar-tabs > li.active > a:focus, .nav-tabs.control-sidebar-tabs > li.active > a:active { border-top: none; border-right: none; border-bottom: none; } @media (max-width: 768px) { .nav-tabs.control-sidebar-tabs { display: table; } .nav-tabs.control-sidebar-tabs > li { display: table-cell; } } .control-sidebar-heading { font-weight: 400; font-size: 16px; padding: 10px 0; margin-bottom: 10px; } .control-sidebar-subheading { display: block; font-weight: 400; font-size: 14px; } .control-sidebar-menu { list-style: none; padding: 0; margin: 0 -15px; } .control-sidebar-menu > li > a { display: block; padding: 10px 15px; } .control-sidebar-menu > li > a:before, .control-sidebar-menu > li > a:after { content: " "; display: table; } .control-sidebar-menu > li > a:after { clear: both; } .control-sidebar-menu > li > a > .control-sidebar-subheading { margin-top: 0; } .control-sidebar-menu .menu-icon { float: left; width: 35px; height: 35px; border-radius: 50%; text-align: center; line-height: 35px; } .control-sidebar-menu .menu-info { margin-left: 45px; margin-top: 3px; } .control-sidebar-menu .menu-info > .control-sidebar-subheading { margin: 0; } .control-sidebar-menu .menu-info > p { margin: 0; font-size: 11px; } .control-sidebar-menu .progress { margin: 0; } .control-sidebar-dark { color: #b8c7ce; } .control-sidebar-dark, .control-sidebar-dark + .control-sidebar-bg { background: #222d32; } .control-sidebar-dark .nav-tabs.control-sidebar-tabs { border-bottom: #1c2529; } .control-sidebar-dark .nav-tabs.control-sidebar-tabs > li > a { background: #181f23; color: #b8c7ce; } .control-sidebar-dark .nav-tabs.control-sidebar-tabs > li > a, .control-sidebar-dark .nav-tabs.control-sidebar-tabs > li > a:hover, .control-sidebar-dark .nav-tabs.control-sidebar-tabs > li > a:focus { border-left-color: #141a1d; border-bottom-color: #141a1d; } .control-sidebar-dark .nav-tabs.control-sidebar-tabs > li > a:hover, .control-sidebar-dark .nav-tabs.control-sidebar-tabs > li > a:focus, .control-sidebar-dark .nav-tabs.control-sidebar-tabs > li > a:active { background: #1c2529; } .control-sidebar-dark .nav-tabs.control-sidebar-tabs > li > a:hover { color: #fff; } .control-sidebar-dark .nav-tabs.control-sidebar-tabs > li.active > a, .control-sidebar-dark .nav-tabs.control-sidebar-tabs > li.active > a:hover, .control-sidebar-dark .nav-tabs.control-sidebar-tabs > li.active > a:focus, .control-sidebar-dark .nav-tabs.control-sidebar-tabs > li.active > a:active { background: #222d32; color: #fff; } .control-sidebar-dark .control-sidebar-heading, .control-sidebar-dark .control-sidebar-subheading { color: #fff; } .control-sidebar-dark .control-sidebar-menu > li > a:hover { background: #1e282c; } .control-sidebar-dark .control-sidebar-menu > li > a .menu-info > p { color: #b8c7ce; } .control-sidebar-light { color: #5e5e5e; } .control-sidebar-light, .control-sidebar-light + .control-sidebar-bg { background: #f9fafc; border-left: 1px solid #d2d6de; } .control-sidebar-light .nav-tabs.control-sidebar-tabs { border-bottom: #d2d6de; } .control-sidebar-light .nav-tabs.control-sidebar-tabs > li > a { background: #e8ecf4; color: #444444; } .control-sidebar-light .nav-tabs.control-sidebar-tabs > li > a, .control-sidebar-light .nav-tabs.control-sidebar-tabs > li > a:hover, .control-sidebar-light .nav-tabs.control-sidebar-tabs > li > a:focus { border-left-color: #d2d6de; border-bottom-color: #d2d6de; } .control-sidebar-light .nav-tabs.control-sidebar-tabs > li > a:hover, .control-sidebar-light .nav-tabs.control-sidebar-tabs > li > a:focus, .control-sidebar-light .nav-tabs.control-sidebar-tabs > li > a:active { background: #eff1f7; } .control-sidebar-light .nav-tabs.control-sidebar-tabs > li.active > a, .control-sidebar-light .nav-tabs.control-sidebar-tabs > li.active > a:hover, .control-sidebar-light .nav-tabs.control-sidebar-tabs > li.active > a:focus, .control-sidebar-light .nav-tabs.control-sidebar-tabs > li.active > a:active { background: #f9fafc; color: #111; } .control-sidebar-light .control-sidebar-heading, .control-sidebar-light .control-sidebar-subheading { color: #111; } .control-sidebar-light .control-sidebar-menu { margin-left: -14px; } .control-sidebar-light .control-sidebar-menu > li > a:hover { background: #f4f4f5; } .control-sidebar-light .control-sidebar-menu > li > a .menu-info > p { color: #5e5e5e; } /* * Component: Dropdown menus * ------------------------- */ /*Dropdowns in general*/ .dropdown-menu { box-shadow: none; border-color: #eee; } .dropdown-menu > li > a { color: #777; } .dropdown-menu > li > a > .glyphicon, .dropdown-menu > li > a > .fa, .dropdown-menu > li > a > .ion { margin-right: 10px; } .dropdown-menu > li > a:hover { background-color: #e1e3e9; color: #333; } .dropdown-menu > .divider { background-color: #eee; } .navbar-nav > .notifications-menu > .dropdown-menu, .navbar-nav > .messages-menu > .dropdown-menu, .navbar-nav > .tasks-menu > .dropdown-menu { width: 280px; padding: 0 0 0 0; margin: 0; top: 100%; } .navbar-nav > .notifications-menu > .dropdown-menu > li, .navbar-nav > .messages-menu > .dropdown-menu > li, .navbar-nav > .tasks-menu > .dropdown-menu > li { position: relative; } .navbar-nav > .notifications-menu > .dropdown-menu > li.header, .navbar-nav > .messages-menu > .dropdown-menu > li.header, .navbar-nav > .tasks-menu > .dropdown-menu > li.header { border-top-left-radius: 4px; border-top-right-radius: 4px; border-bottom-right-radius: 0; border-bottom-left-radius: 0; background-color: #ffffff; padding: 7px 10px; border-bottom: 1px solid #f4f4f4; color: #444444; font-size: 14px; } .navbar-nav > .notifications-menu > .dropdown-menu > li.footer > a, .navbar-nav > .messages-menu > .dropdown-menu > li.footer > a, .navbar-nav > .tasks-menu > .dropdown-menu > li.footer > a { border-top-left-radius: 0; border-top-right-radius: 0; border-bottom-right-radius: 4px; border-bottom-left-radius: 4px; font-size: 12px; background-color: #fff; padding: 7px 10px; border-bottom: 1px solid #eeeeee; color: #444 !important; text-align: center; } @media (max-width: 991px) { .navbar-nav > .notifications-menu > .dropdown-menu > li.footer > a, .navbar-nav > .messages-menu > .dropdown-menu > li.footer > a, .navbar-nav > .tasks-menu > .dropdown-menu > li.footer > a { background: #fff !important; color: #444 !important; } } .navbar-nav > .notifications-menu > .dropdown-menu > li.footer > a:hover, .navbar-nav > .messages-menu > .dropdown-menu > li.footer > a:hover, .navbar-nav > .tasks-menu > .dropdown-menu > li.footer > a:hover { text-decoration: none; font-weight: normal; } .navbar-nav > .notifications-menu > .dropdown-menu > li .menu, .navbar-nav > .messages-menu > .dropdown-menu > li .menu, .navbar-nav > .tasks-menu > .dropdown-menu > li .menu { max-height: 200px; margin: 0; padding: 0; list-style: none; overflow-x: hidden; } .navbar-nav > .notifications-menu > .dropdown-menu > li .menu > li > a, .navbar-nav > .messages-menu > .dropdown-menu > li .menu > li > a, .navbar-nav > .tasks-menu > .dropdown-menu > li .menu > li > a { display: block; white-space: nowrap; /* Prevent text from breaking */ border-bottom: 1px solid #f4f4f4; } .navbar-nav > .notifications-menu > .dropdown-menu > li .menu > li > a:hover, .navbar-nav > .messages-menu > .dropdown-menu > li .menu > li > a:hover, .navbar-nav > .tasks-menu > .dropdown-menu > li .menu > li > a:hover { background: #f4f4f4; text-decoration: none; } .navbar-nav > .notifications-menu > .dropdown-menu > li .menu > li > a { color: #444444; overflow: hidden; text-overflow: ellipsis; padding: 10px; } .navbar-nav > .notifications-menu > .dropdown-menu > li .menu > li > a > .glyphicon, .navbar-nav > .notifications-menu > .dropdown-menu > li .menu > li > a > .fa, .navbar-nav > .notifications-menu > .dropdown-menu > li .menu > li > a > .ion { width: 20px; } .navbar-nav > .messages-menu > .dropdown-menu > li .menu > li > a { margin: 0; padding: 10px 10px; } .navbar-nav > .messages-menu > .dropdown-menu > li .menu > li > a > div > img { margin: auto 10px auto auto; width: 40px; height: 40px; } .navbar-nav > .messages-menu > .dropdown-menu > li .menu > li > a > h4 { padding: 0; margin: 0 0 0 45px; color: #444444; font-size: 15px; position: relative; } .navbar-nav > .messages-menu > .dropdown-menu > li .menu > li > a > h4 > small { color: #999999; font-size: 10px; position: absolute; top: 0; right: 0; } .navbar-nav > .messages-menu > .dropdown-menu > li .menu > li > a > p { margin: 0 0 0 45px; font-size: 12px; color: #888888; } .navbar-nav > .messages-menu > .dropdown-menu > li .menu > li > a:before, .navbar-nav > .messages-menu > .dropdown-menu > li .menu > li > a:after { content: " "; display: table; } .navbar-nav > .messages-menu > .dropdown-menu > li .menu > li > a:after { clear: both; } .navbar-nav > .tasks-menu > .dropdown-menu > li .menu > li > a { padding: 10px; } .navbar-nav > .tasks-menu > .dropdown-menu > li .menu > li > a > h3 { font-size: 14px; padding: 0; margin: 0 0 10px 0; color: #666666; } .navbar-nav > .tasks-menu > .dropdown-menu > li .menu > li > a > .progress { padding: 0; margin: 0; } .navbar-nav > .user-menu > .dropdown-menu { border-top-right-radius: 0; border-top-left-radius: 0; padding: 1px 0 0 0; border-top-width: 0; width: 280px; } .navbar-nav > .user-menu > .dropdown-menu, .navbar-nav > .user-menu > .dropdown-menu > .user-body { border-bottom-right-radius: 4px; border-bottom-left-radius: 4px; } .navbar-nav > .user-menu > .dropdown-menu > li.user-header { height: 175px; padding: 10px; text-align: center; } .navbar-nav > .user-menu > .dropdown-menu > li.user-header > img { z-index: 5; height: 90px; width: 90px; border: 3px solid; border-color: transparent; border-color: rgba(255, 255, 255, 0.2); } .navbar-nav > .user-menu > .dropdown-menu > li.user-header > p { z-index: 5; color: #fff; color: rgba(255, 255, 255, 0.8); font-size: 17px; margin-top: 10px; } .navbar-nav > .user-menu > .dropdown-menu > li.user-header > p > small { display: block; font-size: 12px; } .navbar-nav > .user-menu > .dropdown-menu > .user-body { padding: 15px; border-bottom: 1px solid #f4f4f4; border-top: 1px solid #dddddd; } .navbar-nav > .user-menu > .dropdown-menu > .user-body:before, .navbar-nav > .user-menu > .dropdown-menu > .user-body:after { content: " "; display: table; } .navbar-nav > .user-menu > .dropdown-menu > .user-body:after { clear: both; } .navbar-nav > .user-menu > .dropdown-menu > .user-body a { color: #444 !important; } @media (max-width: 991px) { .navbar-nav > .user-menu > .dropdown-menu > .user-body a { background: #fff !important; color: #444 !important; } } .navbar-nav > .user-menu > .dropdown-menu > .user-footer { background-color: #f9f9f9; padding: 10px; } .navbar-nav > .user-menu > .dropdown-menu > .user-footer:before, .navbar-nav > .user-menu > .dropdown-menu > .user-footer:after { content: " "; display: table; } .navbar-nav > .user-menu > .dropdown-menu > .user-footer:after { clear: both; } .navbar-nav > .user-menu > .dropdown-menu > .user-footer .btn-default { color: #666666; } @media (max-width: 991px) { .navbar-nav > .user-menu > .dropdown-menu > .user-footer .btn-default:hover { background-color: #f9f9f9; } } .navbar-nav > .user-menu .user-image { float: left; width: 25px; height: 25px; border-radius: 50%; margin-right: 10px; margin-top: -2px; } @media (max-width: 767px) { .navbar-nav > .user-menu .user-image { float: none; margin-right: 0; margin-top: -8px; line-height: 10px; } } /* Add fade animation to dropdown menus by appending the class .animated-dropdown-menu to the .dropdown-menu ul (or ol)*/ .open:not(.dropup) > .animated-dropdown-menu { backface-visibility: visible !important; -webkit-animation: flipInX 0.7s both; -o-animation: flipInX 0.7s both; animation: flipInX 0.7s both; } @keyframes flipInX { 0% { transform: perspective(400px) rotate3d(1, 0, 0, 90deg); transition-timing-function: ease-in; opacity: 0; } 40% { transform: perspective(400px) rotate3d(1, 0, 0, -20deg); transition-timing-function: ease-in; } 60% { transform: perspective(400px) rotate3d(1, 0, 0, 10deg); opacity: 1; } 80% { transform: perspective(400px) rotate3d(1, 0, 0, -5deg); } 100% { transform: perspective(400px); } } @-webkit-keyframes flipInX { 0% { -webkit-transform: perspective(400px) rotate3d(1, 0, 0, 90deg); -webkit-transition-timing-function: ease-in; opacity: 0; } 40% { -webkit-transform: perspective(400px) rotate3d(1, 0, 0, -20deg); -webkit-transition-timing-function: ease-in; } 60% { -webkit-transform: perspective(400px) rotate3d(1, 0, 0, 10deg); opacity: 1; } 80% { -webkit-transform: perspective(400px) rotate3d(1, 0, 0, -5deg); } 100% { -webkit-transform: perspective(400px); } } /* Fix dropdown menu in navbars */ .navbar-custom-menu > .navbar-nav > li { position: relative; } .navbar-custom-menu > .navbar-nav > li > .dropdown-menu { position: absolute; right: 0; left: auto; } @media (max-width: 991px) { .navbar-custom-menu > .navbar-nav { float: right; } .navbar-custom-menu > .navbar-nav > li { position: static; } .navbar-custom-menu > .navbar-nav > li > .dropdown-menu { position: absolute; right: 5%; left: auto; border: 1px solid #ddd; background: #fff; } } /* * Component: Form * --------------- */ .form-control { border-radius: 0; box-shadow: none; border-color: #d2d6de; } .form-control:focus { border-color: #3c8dbc; box-shadow: none; } .form-control::-moz-placeholder, .form-control:-ms-input-placeholder, .form-control::-webkit-input-placeholder { color: #bbb; opacity: 1; } .form-control:not(select) { -webkit-appearance: none; -moz-appearance: none; appearance: none; } .form-group.has-success label { color: #00a65a; } .form-group.has-success .form-control { border-color: #00a65a; box-shadow: none; } .form-group.has-success .help-block { color: #00a65a; } .form-group.has-warning label { color: #f39c12; } .form-group.has-warning .form-control { border-color: #f39c12; box-shadow: none; } .form-group.has-warning .help-block { color: #f39c12; } .form-group.has-error label { color: #dd4b39; } .form-group.has-error .form-control { border-color: #dd4b39; box-shadow: none; } .form-group.has-error .help-block { color: #dd4b39; } /* Input group */ .input-group .input-group-addon { border-radius: 0; border-color: #d2d6de; background-color: #fff; } /* button groups */ .btn-group-vertical .btn.btn-flat:first-of-type, .btn-group-vertical .btn.btn-flat:last-of-type { border-radius: 0; } .icheck > label { padding-left: 0; } /* support Font Awesome icons in form-control */ .form-control-feedback.fa { line-height: 34px; } .input-lg + .form-control-feedback.fa, .input-group-lg + .form-control-feedback.fa, .form-group-lg .form-control + .form-control-feedback.fa { line-height: 46px; } .input-sm + .form-control-feedback.fa, .input-group-sm + .form-control-feedback.fa, .form-group-sm .form-control + .form-control-feedback.fa { line-height: 30px; } /* * Component: Progress Bar * ----------------------- */ .progress, .progress > .progress-bar { -webkit-box-shadow: none; box-shadow: none; } .progress, .progress > .progress-bar, .progress .progress-bar, .progress > .progress-bar .progress-bar { border-radius: 1px; } /* size variation */ .progress.sm, .progress-sm { height: 10px; } .progress.sm, .progress-sm, .progress.sm .progress-bar, .progress-sm .progress-bar { border-radius: 1px; } .progress.xs, .progress-xs { height: 7px; } .progress.xs, .progress-xs, .progress.xs .progress-bar, .progress-xs .progress-bar { border-radius: 1px; } .progress.xxs, .progress-xxs { height: 3px; } .progress.xxs, .progress-xxs, .progress.xxs .progress-bar, .progress-xxs .progress-bar { border-radius: 1px; } /* Vertical bars */ .progress.vertical { position: relative; width: 30px; height: 200px; display: inline-block; margin-right: 10px; } .progress.vertical > .progress-bar { width: 100%; position: absolute; bottom: 0; } .progress.vertical.sm, .progress.vertical.progress-sm { width: 20px; } .progress.vertical.xs, .progress.vertical.progress-xs { width: 10px; } .progress.vertical.xxs, .progress.vertical.progress-xxs { width: 3px; } .progress-group .progress-text { font-weight: 600; } .progress-group .progress-number { float: right; } /* Remove margins from progress bars when put in a table */ .table tr > td .progress { margin: 0; } .progress-bar-light-blue, .progress-bar-primary { background-color: #3c8dbc; } .progress-striped .progress-bar-light-blue, .progress-striped .progress-bar-primary { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); } .progress-bar-green, .progress-bar-success { background-color: #00a65a; } .progress-striped .progress-bar-green, .progress-striped .progress-bar-success { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); } .progress-bar-aqua, .progress-bar-info { background-color: #00c0ef; } .progress-striped .progress-bar-aqua, .progress-striped .progress-bar-info { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); } .progress-bar-yellow, .progress-bar-warning { background-color: #f39c12; } .progress-striped .progress-bar-yellow, .progress-striped .progress-bar-warning { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); } .progress-bar-red, .progress-bar-danger { background-color: #dd4b39; } .progress-striped .progress-bar-red, .progress-striped .progress-bar-danger { background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); } /* * Component: Small Box * -------------------- */ .small-box { border-radius: 2px; position: relative; display: block; margin-bottom: 20px; box-shadow: 0 1px 1px rgba(0, 0, 0, 0.1); } .small-box > .inner { padding: 10px; } .small-box > .small-box-footer { position: relative; text-align: center; padding: 3px 0; color: #fff; color: rgba(255, 255, 255, 0.8); display: block; z-index: 10; background: rgba(0, 0, 0, 0.1); text-decoration: none; } .small-box > .small-box-footer:hover { color: #fff; background: rgba(0, 0, 0, 0.15); } .small-box h3 { font-size: 38px; font-weight: bold; margin: 0 0 10px 0; white-space: nowrap; padding: 0; } .small-box p { font-size: 15px; } .small-box p > small { display: block; color: #f9f9f9; font-size: 13px; margin-top: 5px; } .small-box h3, .small-box p { z-index: 5; } .small-box .icon { -webkit-transition: all 0.3s linear; -o-transition: all 0.3s linear; transition: all 0.3s linear; position: absolute; top: -10px; right: 10px; z-index: 0; font-size: 90px; color: rgba(0, 0, 0, 0.15); } .small-box:hover { text-decoration: none; color: #f9f9f9; } .small-box:hover .icon { font-size: 95px; } @media (max-width: 767px) { .small-box { text-align: center; } .small-box .icon { display: none; } .small-box p { font-size: 12px; } } /* * Component: Box * -------------- */ .box { position: relative; border-radius: 3px; background: #ffffff; border-top: 3px solid #d2d6de; margin-bottom: 20px; width: 100%; box-shadow: 0 1px 1px rgba(0, 0, 0, 0.1); } .box.box-primary { border-top-color: #3c8dbc; } .box.box-info { border-top-color: #00c0ef; } .box.box-danger { border-top-color: #dd4b39; } .box.box-warning { border-top-color: #f39c12; } .box.box-success { border-top-color: #00a65a; } .box.box-default { border-top-color: #d2d6de; } .box.collapsed-box .box-body, .box.collapsed-box .box-footer { display: none; } .box .nav-stacked > li { border-bottom: 1px solid #f4f4f4; margin: 0; } .box .nav-stacked > li:last-of-type { border-bottom: none; } .box.height-control .box-body { max-height: 300px; overflow: auto; } .box .border-right { border-right: 1px solid #f4f4f4; } .box .border-left { border-left: 1px solid #f4f4f4; } .box.box-solid { border-top: 0; } .box.box-solid > .box-header .btn.btn-default { background: transparent; } .box.box-solid > .box-header .btn:hover, .box.box-solid > .box-header a:hover { background: rgba(0, 0, 0, 0.1); } .box.box-solid.box-default { border: 1px solid #d2d6de; } .box.box-solid.box-default > .box-header { color: #444444; background: #d2d6de; background-color: #d2d6de; } .box.box-solid.box-default > .box-header a, .box.box-solid.box-default > .box-header .btn { color: #444444; } .box.box-solid.box-primary { border: 1px solid #3c8dbc; } .box.box-solid.box-primary > .box-header { color: #ffffff; background: #3c8dbc; background-color: #3c8dbc; } .box.box-solid.box-primary > .box-header a, .box.box-solid.box-primary > .box-header .btn { color: #ffffff; } .box.box-solid.box-info { border: 1px solid #00c0ef; } .box.box-solid.box-info > .box-header { color: #ffffff; background: #00c0ef; background-color: #00c0ef; } .box.box-solid.box-info > .box-header a, .box.box-solid.box-info > .box-header .btn { color: #ffffff; } .box.box-solid.box-danger { border: 1px solid #dd4b39; } .box.box-solid.box-danger > .box-header { color: #ffffff; background: #dd4b39; background-color: #dd4b39; } .box.box-solid.box-danger > .box-header a, .box.box-solid.box-danger > .box-header .btn { color: #ffffff; } .box.box-solid.box-warning { border: 1px solid #f39c12; } .box.box-solid.box-warning > .box-header { color: #ffffff; background: #f39c12; background-color: #f39c12; } .box.box-solid.box-warning > .box-header a, .box.box-solid.box-warning > .box-header .btn { color: #ffffff; } .box.box-solid.box-success { border: 1px solid #00a65a; } .box.box-solid.box-success > .box-header { color: #ffffff; background: #00a65a; background-color: #00a65a; } .box.box-solid.box-success > .box-header a, .box.box-solid.box-success > .box-header .btn { color: #ffffff; } .box.box-solid > .box-header > .box-tools .btn { border: 0; box-shadow: none; } .box.box-solid[class*='bg'] > .box-header { color: #fff; } .box .box-group > .box { margin-bottom: 5px; } .box .knob-label { text-align: center; color: #333; font-weight: 100; font-size: 12px; margin-bottom: 0.3em; } .box > .overlay, .overlay-wrapper > .overlay, .box > .loading-img, .overlay-wrapper > .loading-img { position: absolute; top: 0; left: 0; width: 100%; height: 100%; } .box .overlay, .overlay-wrapper .overlay { z-index: 50; background: rgba(255, 255, 255, 0.7); border-radius: 3px; } .box .overlay > .fa, .overlay-wrapper .overlay > .fa { position: absolute; top: 50%; left: 50%; margin-left: -15px; margin-top: -15px; color: #000; font-size: 30px; } .box .overlay.dark, .overlay-wrapper .overlay.dark { background: rgba(0, 0, 0, 0.5); } .box-header:before, .box-body:before, .box-footer:before, .box-header:after, .box-body:after, .box-footer:after { content: " "; display: table; } .box-header:after, .box-body:after, .box-footer:after { clear: both; } .box-header { color: #444; display: block; padding: 10px; position: relative; } .box-header.with-border { border-bottom: 1px solid #f4f4f4; } .collapsed-box .box-header.with-border { border-bottom: none; } .box-header > .fa, .box-header > .glyphicon, .box-header > .ion, .box-header .box-title { display: inline-block; font-size: 18px; margin: 0; line-height: 1; } .box-header > .fa, .box-header > .glyphicon, .box-header > .ion { margin-right: 5px; } .box-header > .box-tools { position: absolute; right: 10px; top: 5px; } .box-header > .box-tools [data-toggle="tooltip"] { position: relative; } .box-header > .box-tools.pull-right .dropdown-menu { right: 0; left: auto; } .btn-box-tool { padding: 5px; font-size: 12px; background: transparent; color: #97a0b3; } .open .btn-box-tool, .btn-box-tool:hover { color: #606c84; } .btn-box-tool.btn:active { box-shadow: none; } .box-body { border-top-left-radius: 0; border-top-right-radius: 0; border-bottom-right-radius: 3px; border-bottom-left-radius: 3px; padding: 10px; } .no-header .box-body { border-top-right-radius: 3px; border-top-left-radius: 3px; } .box-body > .table { margin-bottom: 0; } .box-body .fc { margin-top: 5px; } .box-body .full-width-chart { margin: -19px; } .box-body.no-padding .full-width-chart { margin: -9px; } .box-body .box-pane { border-top-left-radius: 0; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 3px; } .box-body .box-pane-right { border-top-left-radius: 0; border-top-right-radius: 0; border-bottom-right-radius: 3px; border-bottom-left-radius: 0; } .box-footer { border-top-left-radius: 0; border-top-right-radius: 0; border-bottom-right-radius: 3px; border-bottom-left-radius: 3px; border-top: 1px solid #f4f4f4; padding: 10px; background-color: #ffffff; } .chart-legend { margin: 10px 0; } @media (max-width: 991px) { .chart-legend > li { float: left; margin-right: 10px; } } .box-comments { background: #f7f7f7; } .box-comments .box-comment { padding: 8px 0; border-bottom: 1px solid #eee; } .box-comments .box-comment:before, .box-comments .box-comment:after { content: " "; display: table; } .box-comments .box-comment:after { clear: both; } .box-comments .box-comment:last-of-type { border-bottom: 0; } .box-comments .box-comment:first-of-type { padding-top: 0; } .box-comments .box-comment img { float: left; } .box-comments .comment-text { margin-left: 40px; color: #555; } .box-comments .username { color: #444; display: block; font-weight: 600; } .box-comments .text-muted { font-weight: 400; font-size: 12px; } /* Widget: TODO LIST */ .todo-list { margin: 0; padding: 0; list-style: none; overflow: auto; } .todo-list > li { border-radius: 2px; padding: 10px; background: #f4f4f4; margin-bottom: 2px; border-left: 2px solid #e6e7e8; color: #444; } .todo-list > li:last-of-type { margin-bottom: 0; } .todo-list > li > input[type='checkbox'] { margin: 0 10px 0 5px; } .todo-list > li .text { display: inline-block; margin-left: 5px; font-weight: 600; } .todo-list > li .label { margin-left: 10px; font-size: 9px; } .todo-list > li .tools { display: none; float: right; color: #dd4b39; } .todo-list > li .tools > .fa, .todo-list > li .tools > .glyphicon, .todo-list > li .tools > .ion { margin-right: 5px; cursor: pointer; } .todo-list > li:hover .tools { display: inline-block; } .todo-list > li.done { color: #999; } .todo-list > li.done .text { text-decoration: line-through; font-weight: 500; } .todo-list > li.done .label { background: #d2d6de !important; } .todo-list .danger { border-left-color: #dd4b39; } .todo-list .warning { border-left-color: #f39c12; } .todo-list .info { border-left-color: #00c0ef; } .todo-list .success { border-left-color: #00a65a; } .todo-list .primary { border-left-color: #3c8dbc; } .todo-list .handle { display: inline-block; cursor: move; margin: 0 5px; } /* Chat widget (DEPRECATED - this will be removed in the next major release. Use Direct Chat instead)*/ .chat { padding: 5px 20px 5px 10px; } .chat .item { margin-bottom: 10px; } .chat .item:before, .chat .item:after { content: " "; display: table; } .chat .item:after { clear: both; } .chat .item > img { width: 40px; height: 40px; border: 2px solid transparent; border-radius: 50%; } .chat .item > .online { border: 2px solid #00a65a; } .chat .item > .offline { border: 2px solid #dd4b39; } .chat .item > .message { margin-left: 55px; margin-top: -40px; } .chat .item > .message > .name { display: block; font-weight: 600; } .chat .item > .attachment { border-radius: 3px; background: #f4f4f4; margin-left: 65px; margin-right: 15px; padding: 10px; } .chat .item > .attachment > h4 { margin: 0 0 5px 0; font-weight: 600; font-size: 14px; } .chat .item > .attachment > p, .chat .item > .attachment > .filename { font-weight: 600; font-size: 13px; font-style: italic; margin: 0; } .chat .item > .attachment:before, .chat .item > .attachment:after { content: " "; display: table; } .chat .item > .attachment:after { clear: both; } .box-input { max-width: 200px; } .modal .panel-body { color: #444; } /* * Component: Info Box * ------------------- */ .info-box { display: block; min-height: 90px; background: #fff; width: 100%; box-shadow: 0 1px 1px rgba(0, 0, 0, 0.1); border-radius: 2px; margin-bottom: 15px; } .info-box small { font-size: 14px; } .info-box .progress { background: rgba(0, 0, 0, 0.2); margin: 5px -10px 5px -10px; height: 2px; } .info-box .progress, .info-box .progress .progress-bar { border-radius: 0; } .info-box .progress .progress-bar { background: #fff; } .info-box-icon { border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; display: block; float: left; height: 90px; width: 90px; text-align: center; font-size: 45px; line-height: 90px; background: rgba(0, 0, 0, 0.2); } .info-box-icon > img { max-width: 100%; } .info-box-content { padding: 5px 10px; margin-left: 90px; } .info-box-number { display: block; font-weight: bold; font-size: 18px; } .progress-description, .info-box-text { display: block; font-size: 14px; white-space: nowrap; overflow: hidden; text-overflow: ellipsis; } .info-box-text { text-transform: uppercase; } .info-box-more { display: block; } .progress-description { margin: 0; } /* * Component: Timeline * ------------------- */ .timeline { position: relative; margin: 0 0 30px 0; padding: 0; list-style: none; } .timeline:before { content: ''; position: absolute; top: 0; bottom: 0; width: 4px; background: #ddd; left: 31px; margin: 0; border-radius: 2px; } .timeline > li { position: relative; margin-right: 10px; margin-bottom: 15px; } .timeline > li:before, .timeline > li:after { content: " "; display: table; } .timeline > li:after { clear: both; } .timeline > li > .timeline-item { -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.1); box-shadow: 0 1px 1px rgba(0, 0, 0, 0.1); border-radius: 3px; margin-top: 0; background: #fff; color: #444; margin-left: 60px; margin-right: 15px; padding: 0; position: relative; } .timeline > li > .timeline-item > .time { color: #999; float: right; padding: 10px; font-size: 12px; } .timeline > li > .timeline-item > .timeline-header { margin: 0; color: #555; border-bottom: 1px solid #f4f4f4; padding: 10px; font-size: 16px; line-height: 1.1; } .timeline > li > .timeline-item > .timeline-header > a { font-weight: 600; } .timeline > li > .timeline-item > .timeline-body, .timeline > li > .timeline-item > .timeline-footer { padding: 10px; } .timeline > li > .fa, .timeline > li > .glyphicon, .timeline > li > .ion { width: 30px; height: 30px; font-size: 15px; line-height: 30px; position: absolute; color: #666; background: #d2d6de; border-radius: 50%; text-align: center; left: 18px; top: 0; } .timeline > .time-label > span { font-weight: 600; padding: 5px; display: inline-block; background-color: #fff; border-radius: 4px; } .timeline-inverse > li > .timeline-item { background: #f0f0f0; border: 1px solid #ddd; -webkit-box-shadow: none; box-shadow: none; } .timeline-inverse > li > .timeline-item > .timeline-header { border-bottom-color: #ddd; } /* * Component: Button * ----------------- */ .btn { border-radius: 3px; -webkit-box-shadow: none; box-shadow: none; border: 1px solid transparent; } .btn.uppercase { text-transform: uppercase; } .btn.btn-flat { border-radius: 0; -webkit-box-shadow: none; -moz-box-shadow: none; box-shadow: none; border-width: 1px; } .btn:active { -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -moz-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); } .btn:focus { outline: none; } .btn.btn-file { position: relative; overflow: hidden; } .btn.btn-file > input[type='file'] { position: absolute; top: 0; right: 0; min-width: 100%; min-height: 100%; font-size: 100px; text-align: right; opacity: 0; filter: alpha(opacity=0); outline: none; background: white; cursor: inherit; display: block; } .btn-default { background-color: #f4f4f4; color: #444; border-color: #ddd; } .btn-default:hover, .btn-default:active, .btn-default.hover { background-color: #e7e7e7; } .btn-primary { background-color: #3c8dbc; border-color: #367fa9; } .btn-primary:hover, .btn-primary:active, .btn-primary.hover { background-color: #367fa9; } .btn-success { background-color: #00a65a; border-color: #008d4c; } .btn-success:hover, .btn-success:active, .btn-success.hover { background-color: #008d4c; } .btn-info { background-color: #00c0ef; border-color: #00acd6; } .btn-info:hover, .btn-info:active, .btn-info.hover { background-color: #00acd6; } .btn-danger { background-color: #dd4b39; border-color: #d73925; } .btn-danger:hover, .btn-danger:active, .btn-danger.hover { background-color: #d73925; } .btn-warning { background-color: #f39c12; border-color: #e08e0b; } .btn-warning:hover, .btn-warning:active, .btn-warning.hover { background-color: #e08e0b; } .btn-outline { border: 1px solid #fff; background: transparent; color: #fff; } .btn-outline:hover, .btn-outline:focus, .btn-outline:active { color: rgba(255, 255, 255, 0.7); border-color: rgba(255, 255, 255, 0.7); } .btn-link { -webkit-box-shadow: none; box-shadow: none; } .btn[class*='bg-']:hover { -webkit-box-shadow: inset 0 0 100px rgba(0, 0, 0, 0.2); box-shadow: inset 0 0 100px rgba(0, 0, 0, 0.2); } .btn-app { border-radius: 3px; position: relative; padding: 15px 5px; margin: 0 0 10px 10px; min-width: 80px; height: 60px; text-align: center; color: #666; border: 1px solid #ddd; background-color: #f4f4f4; font-size: 12px; } .btn-app > .fa, .btn-app > .glyphicon, .btn-app > .ion { font-size: 20px; display: block; } .btn-app:hover { background: #f4f4f4; color: #444; border-color: #aaa; } .btn-app:active, .btn-app:focus { -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -moz-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); } .btn-app > .badge { position: absolute; top: -3px; right: -10px; font-size: 10px; font-weight: 400; } /* * Component: Callout * ------------------ */ .callout { border-radius: 3px; margin: 0 0 20px 0; padding: 15px 30px 15px 15px; border-left: 5px solid #eee; } .callout a { color: #fff; text-decoration: underline; } .callout a:hover { color: #eee; } .callout h4 { margin-top: 0; font-weight: 600; } .callout p:last-child { margin-bottom: 0; } .callout code, .callout .highlight { background-color: #fff; } .callout.callout-danger { border-color: #c23321; } .callout.callout-warning { border-color: #c87f0a; } .callout.callout-info { border-color: #0097bc; } .callout.callout-success { border-color: #00733e; } /* * Component: alert * ---------------- */ .alert { border-radius: 3px; } .alert h4 { font-weight: 600; } .alert .icon { margin-right: 10px; } .alert .close { color: #000; opacity: 0.2; filter: alpha(opacity=20); } .alert .close:hover { opacity: 0.5; filter: alpha(opacity=50); } .alert a { color: #fff; text-decoration: underline; } .alert-success { border-color: #008d4c; } .alert-danger, .alert-error { border-color: #d73925; } .alert-warning { border-color: #e08e0b; } .alert-info { border-color: #00acd6; } /* * Component: Nav * -------------- */ .nav > li > a:hover, .nav > li > a:active, .nav > li > a:focus { color: #444; background: #f7f7f7; } /* NAV PILLS */ .nav-pills > li > a { border-radius: 0; border-top: 3px solid transparent; color: #444; } .nav-pills > li > a > .fa, .nav-pills > li > a > .glyphicon, .nav-pills > li > a > .ion { margin-right: 5px; } .nav-pills > li.active > a, .nav-pills > li.active > a:hover, .nav-pills > li.active > a:focus { border-top-color: #3c8dbc; } .nav-pills > li.active > a { font-weight: 600; } /* NAV STACKED */ .nav-stacked > li > a { border-radius: 0; border-top: 0; border-left: 3px solid transparent; color: #444; } .nav-stacked > li.active > a, .nav-stacked > li.active > a:hover { background: transparent; color: #444; border-top: 0; border-left-color: #3c8dbc; } .nav-stacked > li.header { border-bottom: 1px solid #ddd; color: #777; margin-bottom: 10px; padding: 5px 10px; text-transform: uppercase; } /* NAV TABS */ .nav-tabs-custom { margin-bottom: 20px; background: #fff; box-shadow: 0 1px 1px rgba(0, 0, 0, 0.1); border-radius: 3px; } .nav-tabs-custom > .nav-tabs { margin: 0; border-bottom-color: #f4f4f4; border-top-right-radius: 3px; border-top-left-radius: 3px; } .nav-tabs-custom > .nav-tabs > li { border-top: 3px solid transparent; margin-bottom: -2px; margin-right: 5px; } .nav-tabs-custom > .nav-tabs > li > a { color: #444; border-radius: 0; } .nav-tabs-custom > .nav-tabs > li > a.text-muted { color: #999; } .nav-tabs-custom > .nav-tabs > li > a, .nav-tabs-custom > .nav-tabs > li > a:hover { background: transparent; margin: 0; } .nav-tabs-custom > .nav-tabs > li > a:hover { color: #999; } .nav-tabs-custom > .nav-tabs > li:not(.active) > a:hover, .nav-tabs-custom > .nav-tabs > li:not(.active) > a:focus, .nav-tabs-custom > .nav-tabs > li:not(.active) > a:active { border-color: transparent; } .nav-tabs-custom > .nav-tabs > li.active { border-top-color: #3c8dbc; } .nav-tabs-custom > .nav-tabs > li.active > a, .nav-tabs-custom > .nav-tabs > li.active:hover > a { background-color: #fff; color: #444; } .nav-tabs-custom > .nav-tabs > li.active > a { border-top-color: transparent; border-left-color: #f4f4f4; border-right-color: #f4f4f4; } .nav-tabs-custom > .nav-tabs > li:first-of-type { margin-left: 0; } .nav-tabs-custom > .nav-tabs > li:first-of-type.active > a { border-left-color: transparent; } .nav-tabs-custom > .nav-tabs.pull-right { float: none !important; } .nav-tabs-custom > .nav-tabs.pull-right > li { float: right; } .nav-tabs-custom > .nav-tabs.pull-right > li:first-of-type { margin-right: 0; } .nav-tabs-custom > .nav-tabs.pull-right > li:first-of-type > a { border-left-width: 1px; } .nav-tabs-custom > .nav-tabs.pull-right > li:first-of-type.active > a { border-left-color: #f4f4f4; border-right-color: transparent; } .nav-tabs-custom > .nav-tabs > li.header { line-height: 35px; padding: 0 10px; font-size: 20px; color: #444; } .nav-tabs-custom > .nav-tabs > li.header > .fa, .nav-tabs-custom > .nav-tabs > li.header > .glyphicon, .nav-tabs-custom > .nav-tabs > li.header > .ion { margin-right: 5px; } .nav-tabs-custom > .tab-content { background: #fff; padding: 10px; border-bottom-right-radius: 3px; border-bottom-left-radius: 3px; } .nav-tabs-custom .dropdown.open > a:active, .nav-tabs-custom .dropdown.open > a:focus { background: transparent; color: #999; } .nav-tabs-custom.tab-primary > .nav-tabs > li.active { border-top-color: #3c8dbc; } .nav-tabs-custom.tab-info > .nav-tabs > li.active { border-top-color: #00c0ef; } .nav-tabs-custom.tab-danger > .nav-tabs > li.active { border-top-color: #dd4b39; } .nav-tabs-custom.tab-warning > .nav-tabs > li.active { border-top-color: #f39c12; } .nav-tabs-custom.tab-success > .nav-tabs > li.active { border-top-color: #00a65a; } .nav-tabs-custom.tab-default > .nav-tabs > li.active { border-top-color: #d2d6de; } /* PAGINATION */ .pagination > li > a { background: #fafafa; color: #666; } .pagination.pagination-flat > li > a { border-radius: 0 !important; } /* * Component: Products List * ------------------------ */ .products-list { list-style: none; margin: 0; padding: 0; } .products-list > .item { border-radius: 3px; -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.1); box-shadow: 0 1px 1px rgba(0, 0, 0, 0.1); padding: 10px 0; background: #fff; } .products-list > .item:before, .products-list > .item:after { content: " "; display: table; } .products-list > .item:after { clear: both; } .products-list .product-img { float: left; } .products-list .product-img img { width: 50px; height: 50px; } .products-list .product-info { margin-left: 60px; } .products-list .product-title { font-weight: 600; } .products-list .product-description { display: block; color: #999; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; } .product-list-in-box > .item { -webkit-box-shadow: none; box-shadow: none; border-radius: 0; border-bottom: 1px solid #f4f4f4; } .product-list-in-box > .item:last-of-type { border-bottom-width: 0; } /* * Component: Table * ---------------- */ .table > thead > tr > th, .table > tbody > tr > th, .table > tfoot > tr > th, .table > thead > tr > td, .table > tbody > tr > td, .table > tfoot > tr > td { border-top: 1px solid #f4f4f4; } .table > thead > tr > th { border-bottom: 2px solid #f4f4f4; } .table tr td .progress { margin-top: 5px; } .table-bordered { border: 1px solid #f4f4f4; } .table-bordered > thead > tr > th, .table-bordered > tbody > tr > th, .table-bordered > tfoot > tr > th, .table-bordered > thead > tr > td, .table-bordered > tbody > tr > td, .table-bordered > tfoot > tr > td { border: 1px solid #f4f4f4; } .table-bordered > thead > tr > th, .table-bordered > thead > tr > td { border-bottom-width: 2px; } .table.no-border, .table.no-border td, .table.no-border th { border: 0; } /* .text-center in tables */ table.text-center, table.text-center td, table.text-center th { text-align: center; } .table.align th { text-align: left; } .table.align td { text-align: right; } /* * Component: Label * ---------------- */ .label-default { background-color: #d2d6de; color: #444; } /* * Component: Direct Chat * ---------------------- */ .direct-chat .box-body { border-bottom-right-radius: 0; border-bottom-left-radius: 0; position: relative; overflow-x: hidden; padding: 0; } .direct-chat.chat-pane-open .direct-chat-contacts { -webkit-transform: translate(0, 0); -ms-transform: translate(0, 0); -o-transform: translate(0, 0); transform: translate(0, 0); } .direct-chat-messages { -webkit-transform: translate(0, 0); -ms-transform: translate(0, 0); -o-transform: translate(0, 0); transform: translate(0, 0); padding: 10px; height: 250px; overflow: auto; } .direct-chat-msg, .direct-chat-text { display: block; } .direct-chat-msg { margin-bottom: 10px; } .direct-chat-msg:before, .direct-chat-msg:after { content: " "; display: table; } .direct-chat-msg:after { clear: both; } .direct-chat-messages, .direct-chat-contacts { -webkit-transition: -webkit-transform 0.5s ease-in-out; -moz-transition: -moz-transform 0.5s ease-in-out; -o-transition: -o-transform 0.5s ease-in-out; transition: transform 0.5s ease-in-out; } .direct-chat-text { border-radius: 5px; position: relative; padding: 5px 10px; background: #d2d6de; border: 1px solid #d2d6de; margin: 5px 0 0 50px; color: #444444; } .direct-chat-text:after, .direct-chat-text:before { position: absolute; right: 100%; top: 15px; border: solid transparent; border-right-color: #d2d6de; content: ' '; height: 0; width: 0; pointer-events: none; } .direct-chat-text:after { border-width: 5px; margin-top: -5px; } .direct-chat-text:before { border-width: 6px; margin-top: -6px; } .right .direct-chat-text { margin-right: 50px; margin-left: 0; } .right .direct-chat-text:after, .right .direct-chat-text:before { right: auto; left: 100%; border-right-color: transparent; border-left-color: #d2d6de; } .direct-chat-img { border-radius: 50%; float: left; width: 40px; height: 40px; } .right .direct-chat-img { float: right; } .direct-chat-info { display: block; margin-bottom: 2px; font-size: 12px; } .direct-chat-name { font-weight: 600; } .direct-chat-timestamp { color: #999; } .direct-chat-contacts-open .direct-chat-contacts { -webkit-transform: translate(0, 0); -ms-transform: translate(0, 0); -o-transform: translate(0, 0); transform: translate(0, 0); } .direct-chat-contacts { -webkit-transform: translate(101%, 0); -ms-transform: translate(101%, 0); -o-transform: translate(101%, 0); transform: translate(101%, 0); position: absolute; top: 0; bottom: 0; height: 250px; width: 100%; background: #222d32; color: #fff; overflow: auto; } .contacts-list > li { border-bottom: 1px solid rgba(0, 0, 0, 0.2); padding: 10px; margin: 0; } .contacts-list > li:before, .contacts-list > li:after { content: " "; display: table; } .contacts-list > li:after { clear: both; } .contacts-list > li:last-of-type { border-bottom: none; } .contacts-list-img { border-radius: 50%; width: 40px; float: left; } .contacts-list-info { margin-left: 45px; color: #fff; } .contacts-list-name, .contacts-list-status { display: block; } .contacts-list-name { font-weight: 600; } .contacts-list-status { font-size: 12px; } .contacts-list-date { color: #aaa; font-weight: normal; } .contacts-list-msg { color: #999; } .direct-chat-danger .right > .direct-chat-text { background: #dd4b39; border-color: #dd4b39; color: #ffffff; } .direct-chat-danger .right > .direct-chat-text:after, .direct-chat-danger .right > .direct-chat-text:before { border-left-color: #dd4b39; } .direct-chat-primary .right > .direct-chat-text { background: #3c8dbc; border-color: #3c8dbc; color: #ffffff; } .direct-chat-primary .right > .direct-chat-text:after, .direct-chat-primary .right > .direct-chat-text:before { border-left-color: #3c8dbc; } .direct-chat-warning .right > .direct-chat-text { background: #f39c12; border-color: #f39c12; color: #ffffff; } .direct-chat-warning .right > .direct-chat-text:after, .direct-chat-warning .right > .direct-chat-text:before { border-left-color: #f39c12; } .direct-chat-info .right > .direct-chat-text { background: #00c0ef; border-color: #00c0ef; color: #ffffff; } .direct-chat-info .right > .direct-chat-text:after, .direct-chat-info .right > .direct-chat-text:before { border-left-color: #00c0ef; } .direct-chat-success .right > .direct-chat-text { background: #00a65a; border-color: #00a65a; color: #ffffff; } .direct-chat-success .right > .direct-chat-text:after, .direct-chat-success .right > .direct-chat-text:before { border-left-color: #00a65a; } /* * Component: Users List * --------------------- */ .users-list > li { width: 25%; float: left; padding: 10px; text-align: center; } .users-list > li img { border-radius: 50%; max-width: 100%; height: auto; } .users-list > li > a:hover, .users-list > li > a:hover .users-list-name { color: #999; } .users-list-name, .users-list-date { display: block; } .users-list-name { font-weight: 600; color: #444; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; } .users-list-date { color: #999; font-size: 12px; } /* * Component: Carousel * ------------------- */ .carousel-control.left, .carousel-control.right { background-image: none; } .carousel-control > .fa { font-size: 40px; position: absolute; top: 50%; z-index: 5; display: inline-block; margin-top: -20px; } /* * Component: modal * ---------------- */ .modal { background: rgba(0, 0, 0, 0.3); } .modal-content { border-radius: 0; -webkit-box-shadow: 0 2px 3px rgba(0, 0, 0, 0.125); box-shadow: 0 2px 3px rgba(0, 0, 0, 0.125); border: 0; } @media (min-width: 768px) { .modal-content { -webkit-box-shadow: 0 2px 3px rgba(0, 0, 0, 0.125); box-shadow: 0 2px 3px rgba(0, 0, 0, 0.125); } } .modal-header { border-bottom-color: #f4f4f4; } .modal-footer { border-top-color: #f4f4f4; } .modal-primary .modal-header, .modal-primary .modal-footer { border-color: #307095; } .modal-warning .modal-header, .modal-warning .modal-footer { border-color: #c87f0a; } .modal-info .modal-header, .modal-info .modal-footer { border-color: #0097bc; } .modal-success .modal-header, .modal-success .modal-footer { border-color: #00733e; } .modal-danger .modal-header, .modal-danger .modal-footer { border-color: #c23321; } /* * Component: Social Widgets * ------------------------- */ .box-widget { border: none; position: relative; } .widget-user .widget-user-header { padding: 20px; height: 120px; border-top-right-radius: 3px; border-top-left-radius: 3px; } .widget-user .widget-user-username { margin-top: 0; margin-bottom: 5px; font-size: 25px; font-weight: 300; text-shadow: 0 1px 1px rgba(0, 0, 0, 0.2); } .widget-user .widget-user-desc { margin-top: 0; } .widget-user .widget-user-image { position: absolute; top: 65px; left: 50%; margin-left: -45px; } .widget-user .widget-user-image > img { width: 90px; height: auto; border: 3px solid #fff; } .widget-user .box-footer { padding-top: 30px; } .widget-user-2 .widget-user-header { padding: 20px; border-top-right-radius: 3px; border-top-left-radius: 3px; } .widget-user-2 .widget-user-username { margin-top: 5px; margin-bottom: 5px; font-size: 25px; font-weight: 300; } .widget-user-2 .widget-user-desc { margin-top: 0; } .widget-user-2 .widget-user-username, .widget-user-2 .widget-user-desc { margin-left: 75px; } .widget-user-2 .widget-user-image > img { width: 65px; height: auto; float: left; } /* * Page: Mailbox * ------------- */ .mailbox-messages > .table { margin: 0; } .mailbox-controls { padding: 5px; } .mailbox-controls.with-border { border-bottom: 1px solid #f4f4f4; } .mailbox-read-info { border-bottom: 1px solid #f4f4f4; padding: 10px; } .mailbox-read-info h3 { font-size: 20px; margin: 0; } .mailbox-read-info h5 { margin: 0; padding: 5px 0 0 0; } .mailbox-read-time { color: #999; font-size: 13px; } .mailbox-read-message { padding: 10px; } .mailbox-attachments li { float: left; width: 200px; border: 1px solid #eee; margin-bottom: 10px; margin-right: 10px; } .mailbox-attachment-name { font-weight: bold; color: #666; } .mailbox-attachment-icon, .mailbox-attachment-info, .mailbox-attachment-size { display: block; } .mailbox-attachment-info { padding: 10px; background: #f4f4f4; } .mailbox-attachment-size { color: #999; font-size: 12px; } .mailbox-attachment-icon { text-align: center; font-size: 65px; color: #666; padding: 20px 10px; } .mailbox-attachment-icon.has-img { padding: 0; } .mailbox-attachment-icon.has-img > img { max-width: 100%; height: auto; } /* * Page: Lock Screen * ----------------- */ /* ADD THIS CLASS TO THE TAG */ .lockscreen { background: #d2d6de; } .lockscreen-logo { font-size: 35px; text-align: center; margin-bottom: 25px; font-weight: 300; } .lockscreen-logo a { color: #444; } .lockscreen-wrapper { max-width: 400px; margin: 0 auto; margin-top: 10%; } /* User name [optional] */ .lockscreen .lockscreen-name { text-align: center; font-weight: 600; } /* Will contain the image and the sign in form */ .lockscreen-item { border-radius: 4px; padding: 0; background: #fff; position: relative; margin: 10px auto 30px auto; width: 290px; } /* User image */ .lockscreen-image { border-radius: 50%; position: absolute; left: -10px; top: -25px; background: #fff; padding: 5px; z-index: 10; } .lockscreen-image > img { border-radius: 50%; width: 70px; height: 70px; } /* Contains the password input and the login button */ .lockscreen-credentials { margin-left: 70px; } .lockscreen-credentials .form-control { border: 0; } .lockscreen-credentials .btn { background-color: #fff; border: 0; padding: 0 10px; } .lockscreen-footer { margin-top: 10px; } /* * Page: Login & Register * ---------------------- */ .login-logo, .register-logo { font-size: 35px; text-align: center; margin-bottom: 25px; font-weight: 300; } .login-logo a, .register-logo a { color: #444; } .login-page, .register-page { background: #d2d6de; } .login-box, .register-box { width: 360px; margin: 7% auto; } @media (max-width: 768px) { .login-box, .register-box { width: 90%; margin-top: 20px; } } .login-box-body, .register-box-body { background: #fff; padding: 20px; border-top: 0; color: #666; } .login-box-body .form-control-feedback, .register-box-body .form-control-feedback { color: #777; } .login-box-msg, .register-box-msg { margin: 0; text-align: center; padding: 0 20px 20px 20px; } .social-auth-links { margin: 10px 0; } /* * Page: 400 and 500 error pages * ------------------------------ */ .error-page { width: 600px; margin: 20px auto 0 auto; } @media (max-width: 991px) { .error-page { width: 100%; } } .error-page > .headline { float: left; font-size: 100px; font-weight: 300; } @media (max-width: 991px) { .error-page > .headline { float: none; text-align: center; } } .error-page > .error-content { margin-left: 190px; display: block; } @media (max-width: 991px) { .error-page > .error-content { margin-left: 0; } } .error-page > .error-content > h3 { font-weight: 300; font-size: 25px; } @media (max-width: 991px) { .error-page > .error-content > h3 { text-align: center; } } /* * Page: Invoice * ------------- */ .invoice { position: relative; background: #fff; border: 1px solid #f4f4f4; padding: 20px; margin: 10px 25px; } .invoice-title { margin-top: 0; } /* * Page: Profile * ------------- */ .profile-user-img { margin: 0 auto; width: 100px; padding: 3px; border: 3px solid #d2d6de; } .profile-username { font-size: 21px; margin-top: 5px; } .post { border-bottom: 1px solid #d2d6de; margin-bottom: 15px; padding-bottom: 15px; color: #666; } .post:last-of-type { border-bottom: 0; margin-bottom: 0; padding-bottom: 0; } .post .user-block { margin-bottom: 15px; } /* * Social Buttons for Bootstrap * * Copyright 2013-2015 Panayiotis Lipiridis * Licensed under the MIT License * * https://github.com/lipis/bootstrap-social */ .btn-social { position: relative; padding-left: 44px; text-align: left; white-space: nowrap; overflow: hidden; text-overflow: ellipsis; } .btn-social > :first-child { position: absolute; left: 0; top: 0; bottom: 0; width: 32px; line-height: 34px; font-size: 1.6em; text-align: center; border-right: 1px solid rgba(0, 0, 0, 0.2); } .btn-social.btn-lg { padding-left: 61px; } .btn-social.btn-lg > :first-child { line-height: 45px; width: 45px; font-size: 1.8em; } .btn-social.btn-sm { padding-left: 38px; } .btn-social.btn-sm > :first-child { line-height: 28px; width: 28px; font-size: 1.4em; } .btn-social.btn-xs { padding-left: 30px; } .btn-social.btn-xs > :first-child { line-height: 20px; width: 20px; font-size: 1.2em; } .btn-social-icon { position: relative; padding-left: 44px; text-align: left; white-space: nowrap; overflow: hidden; text-overflow: ellipsis; height: 34px; width: 34px; padding: 0; } .btn-social-icon > :first-child { position: absolute; left: 0; top: 0; bottom: 0; width: 32px; line-height: 34px; font-size: 1.6em; text-align: center; border-right: 1px solid rgba(0, 0, 0, 0.2); } .btn-social-icon.btn-lg { padding-left: 61px; } .btn-social-icon.btn-lg > :first-child { line-height: 45px; width: 45px; font-size: 1.8em; } .btn-social-icon.btn-sm { padding-left: 38px; } .btn-social-icon.btn-sm > :first-child { line-height: 28px; width: 28px; font-size: 1.4em; } .btn-social-icon.btn-xs { padding-left: 30px; } .btn-social-icon.btn-xs > :first-child { line-height: 20px; width: 20px; font-size: 1.2em; } .btn-social-icon > :first-child { border: none; text-align: center; width: 100%; } .btn-social-icon.btn-lg { height: 45px; width: 45px; padding-left: 0; padding-right: 0; } .btn-social-icon.btn-sm { height: 30px; width: 30px; padding-left: 0; padding-right: 0; } .btn-social-icon.btn-xs { height: 22px; width: 22px; padding-left: 0; padding-right: 0; } .btn-adn { color: #ffffff; background-color: #d87a68; border-color: rgba(0, 0, 0, 0.2); } .btn-adn:focus, .btn-adn.focus { color: #ffffff; background-color: #ce563f; border-color: rgba(0, 0, 0, 0.2); } .btn-adn:hover { color: #ffffff; background-color: #ce563f; border-color: rgba(0, 0, 0, 0.2); } .btn-adn:active, .btn-adn.active, .open > .dropdown-toggle.btn-adn { color: #ffffff; background-color: #ce563f; border-color: rgba(0, 0, 0, 0.2); } .btn-adn:active, .btn-adn.active, .open > .dropdown-toggle.btn-adn { background-image: none; } .btn-adn .badge { color: #d87a68; background-color: #ffffff; } .btn-bitbucket { color: #ffffff; background-color: #205081; border-color: rgba(0, 0, 0, 0.2); } .btn-bitbucket:focus, .btn-bitbucket.focus { color: #ffffff; background-color: #163758; border-color: rgba(0, 0, 0, 0.2); } .btn-bitbucket:hover { color: #ffffff; background-color: #163758; border-color: rgba(0, 0, 0, 0.2); } .btn-bitbucket:active, .btn-bitbucket.active, .open > .dropdown-toggle.btn-bitbucket { color: #ffffff; background-color: #163758; border-color: rgba(0, 0, 0, 0.2); } .btn-bitbucket:active, .btn-bitbucket.active, .open > .dropdown-toggle.btn-bitbucket { background-image: none; } .btn-bitbucket .badge { color: #205081; background-color: #ffffff; } .btn-dropbox { color: #ffffff; background-color: #1087dd; border-color: rgba(0, 0, 0, 0.2); } .btn-dropbox:focus, .btn-dropbox.focus { color: #ffffff; background-color: #0d6aad; border-color: rgba(0, 0, 0, 0.2); } .btn-dropbox:hover { color: #ffffff; background-color: #0d6aad; border-color: rgba(0, 0, 0, 0.2); } .btn-dropbox:active, .btn-dropbox.active, .open > .dropdown-toggle.btn-dropbox { color: #ffffff; background-color: #0d6aad; border-color: rgba(0, 0, 0, 0.2); } .btn-dropbox:active, .btn-dropbox.active, .open > .dropdown-toggle.btn-dropbox { background-image: none; } .btn-dropbox .badge { color: #1087dd; background-color: #ffffff; } .btn-facebook { color: #ffffff; background-color: #3b5998; border-color: rgba(0, 0, 0, 0.2); } .btn-facebook:focus, .btn-facebook.focus { color: #ffffff; background-color: #2d4373; border-color: rgba(0, 0, 0, 0.2); } .btn-facebook:hover { color: #ffffff; background-color: #2d4373; border-color: rgba(0, 0, 0, 0.2); } .btn-facebook:active, .btn-facebook.active, .open > .dropdown-toggle.btn-facebook { color: #ffffff; background-color: #2d4373; border-color: rgba(0, 0, 0, 0.2); } .btn-facebook:active, .btn-facebook.active, .open > .dropdown-toggle.btn-facebook { background-image: none; } .btn-facebook .badge { color: #3b5998; background-color: #ffffff; } .btn-flickr { color: #ffffff; background-color: #ff0084; border-color: rgba(0, 0, 0, 0.2); } .btn-flickr:focus, .btn-flickr.focus { color: #ffffff; background-color: #cc006a; border-color: rgba(0, 0, 0, 0.2); } .btn-flickr:hover { color: #ffffff; background-color: #cc006a; border-color: rgba(0, 0, 0, 0.2); } .btn-flickr:active, .btn-flickr.active, .open > .dropdown-toggle.btn-flickr { color: #ffffff; background-color: #cc006a; border-color: rgba(0, 0, 0, 0.2); } .btn-flickr:active, .btn-flickr.active, .open > .dropdown-toggle.btn-flickr { background-image: none; } .btn-flickr .badge { color: #ff0084; background-color: #ffffff; } .btn-foursquare { color: #ffffff; background-color: #f94877; border-color: rgba(0, 0, 0, 0.2); } .btn-foursquare:focus, .btn-foursquare.focus { color: #ffffff; background-color: #f71752; border-color: rgba(0, 0, 0, 0.2); } .btn-foursquare:hover { color: #ffffff; background-color: #f71752; border-color: rgba(0, 0, 0, 0.2); } .btn-foursquare:active, .btn-foursquare.active, .open > .dropdown-toggle.btn-foursquare { color: #ffffff; background-color: #f71752; border-color: rgba(0, 0, 0, 0.2); } .btn-foursquare:active, .btn-foursquare.active, .open > .dropdown-toggle.btn-foursquare { background-image: none; } .btn-foursquare .badge { color: #f94877; background-color: #ffffff; } .btn-github { color: #ffffff; background-color: #444444; border-color: rgba(0, 0, 0, 0.2); } .btn-github:focus, .btn-github.focus { color: #ffffff; background-color: #2b2b2b; border-color: rgba(0, 0, 0, 0.2); } .btn-github:hover { color: #ffffff; background-color: #2b2b2b; border-color: rgba(0, 0, 0, 0.2); } .btn-github:active, .btn-github.active, .open > .dropdown-toggle.btn-github { color: #ffffff; background-color: #2b2b2b; border-color: rgba(0, 0, 0, 0.2); } .btn-github:active, .btn-github.active, .open > .dropdown-toggle.btn-github { background-image: none; } .btn-github .badge { color: #444444; background-color: #ffffff; } .btn-google { color: #ffffff; background-color: #dd4b39; border-color: rgba(0, 0, 0, 0.2); } .btn-google:focus, .btn-google.focus { color: #ffffff; background-color: #c23321; border-color: rgba(0, 0, 0, 0.2); } .btn-google:hover { color: #ffffff; background-color: #c23321; border-color: rgba(0, 0, 0, 0.2); } .btn-google:active, .btn-google.active, .open > .dropdown-toggle.btn-google { color: #ffffff; background-color: #c23321; border-color: rgba(0, 0, 0, 0.2); } .btn-google:active, .btn-google.active, .open > .dropdown-toggle.btn-google { background-image: none; } .btn-google .badge { color: #dd4b39; background-color: #ffffff; } .btn-instagram { color: #ffffff; background-color: #3f729b; border-color: rgba(0, 0, 0, 0.2); } .btn-instagram:focus, .btn-instagram.focus { color: #ffffff; background-color: #305777; border-color: rgba(0, 0, 0, 0.2); } .btn-instagram:hover { color: #ffffff; background-color: #305777; border-color: rgba(0, 0, 0, 0.2); } .btn-instagram:active, .btn-instagram.active, .open > .dropdown-toggle.btn-instagram { color: #ffffff; background-color: #305777; border-color: rgba(0, 0, 0, 0.2); } .btn-instagram:active, .btn-instagram.active, .open > .dropdown-toggle.btn-instagram { background-image: none; } .btn-instagram .badge { color: #3f729b; background-color: #ffffff; } .btn-linkedin { color: #ffffff; background-color: #007bb6; border-color: rgba(0, 0, 0, 0.2); } .btn-linkedin:focus, .btn-linkedin.focus { color: #ffffff; background-color: #005983; border-color: rgba(0, 0, 0, 0.2); } .btn-linkedin:hover { color: #ffffff; background-color: #005983; border-color: rgba(0, 0, 0, 0.2); } .btn-linkedin:active, .btn-linkedin.active, .open > .dropdown-toggle.btn-linkedin { color: #ffffff; background-color: #005983; border-color: rgba(0, 0, 0, 0.2); } .btn-linkedin:active, .btn-linkedin.active, .open > .dropdown-toggle.btn-linkedin { background-image: none; } .btn-linkedin .badge { color: #007bb6; background-color: #ffffff; } .btn-microsoft { color: #ffffff; background-color: #2672ec; border-color: rgba(0, 0, 0, 0.2); } .btn-microsoft:focus, .btn-microsoft.focus { color: #ffffff; background-color: #125acd; border-color: rgba(0, 0, 0, 0.2); } .btn-microsoft:hover { color: #ffffff; background-color: #125acd; border-color: rgba(0, 0, 0, 0.2); } .btn-microsoft:active, .btn-microsoft.active, .open > .dropdown-toggle.btn-microsoft { color: #ffffff; background-color: #125acd; border-color: rgba(0, 0, 0, 0.2); } .btn-microsoft:active, .btn-microsoft.active, .open > .dropdown-toggle.btn-microsoft { background-image: none; } .btn-microsoft .badge { color: #2672ec; background-color: #ffffff; } .btn-openid { color: #ffffff; background-color: #f7931e; border-color: rgba(0, 0, 0, 0.2); } .btn-openid:focus, .btn-openid.focus { color: #ffffff; background-color: #da7908; border-color: rgba(0, 0, 0, 0.2); } .btn-openid:hover { color: #ffffff; background-color: #da7908; border-color: rgba(0, 0, 0, 0.2); } .btn-openid:active, .btn-openid.active, .open > .dropdown-toggle.btn-openid { color: #ffffff; background-color: #da7908; border-color: rgba(0, 0, 0, 0.2); } .btn-openid:active, .btn-openid.active, .open > .dropdown-toggle.btn-openid { background-image: none; } .btn-openid .badge { color: #f7931e; background-color: #ffffff; } .btn-pinterest { color: #ffffff; background-color: #cb2027; border-color: rgba(0, 0, 0, 0.2); } .btn-pinterest:focus, .btn-pinterest.focus { color: #ffffff; background-color: #9f191f; border-color: rgba(0, 0, 0, 0.2); } .btn-pinterest:hover { color: #ffffff; background-color: #9f191f; border-color: rgba(0, 0, 0, 0.2); } .btn-pinterest:active, .btn-pinterest.active, .open > .dropdown-toggle.btn-pinterest { color: #ffffff; background-color: #9f191f; border-color: rgba(0, 0, 0, 0.2); } .btn-pinterest:active, .btn-pinterest.active, .open > .dropdown-toggle.btn-pinterest { background-image: none; } .btn-pinterest .badge { color: #cb2027; background-color: #ffffff; } .btn-reddit { color: #000000; background-color: #eff7ff; border-color: rgba(0, 0, 0, 0.2); } .btn-reddit:focus, .btn-reddit.focus { color: #000000; background-color: #bcddff; border-color: rgba(0, 0, 0, 0.2); } .btn-reddit:hover { color: #000000; background-color: #bcddff; border-color: rgba(0, 0, 0, 0.2); } .btn-reddit:active, .btn-reddit.active, .open > .dropdown-toggle.btn-reddit { color: #000000; background-color: #bcddff; border-color: rgba(0, 0, 0, 0.2); } .btn-reddit:active, .btn-reddit.active, .open > .dropdown-toggle.btn-reddit { background-image: none; } .btn-reddit .badge { color: #eff7ff; background-color: #000000; } .btn-soundcloud { color: #ffffff; background-color: #ff5500; border-color: rgba(0, 0, 0, 0.2); } .btn-soundcloud:focus, .btn-soundcloud.focus { color: #ffffff; background-color: #cc4400; border-color: rgba(0, 0, 0, 0.2); } .btn-soundcloud:hover { color: #ffffff; background-color: #cc4400; border-color: rgba(0, 0, 0, 0.2); } .btn-soundcloud:active, .btn-soundcloud.active, .open > .dropdown-toggle.btn-soundcloud { color: #ffffff; background-color: #cc4400; border-color: rgba(0, 0, 0, 0.2); } .btn-soundcloud:active, .btn-soundcloud.active, .open > .dropdown-toggle.btn-soundcloud { background-image: none; } .btn-soundcloud .badge { color: #ff5500; background-color: #ffffff; } .btn-tumblr { color: #ffffff; background-color: #2c4762; border-color: rgba(0, 0, 0, 0.2); } .btn-tumblr:focus, .btn-tumblr.focus { color: #ffffff; background-color: #1c2d3f; border-color: rgba(0, 0, 0, 0.2); } .btn-tumblr:hover { color: #ffffff; background-color: #1c2d3f; border-color: rgba(0, 0, 0, 0.2); } .btn-tumblr:active, .btn-tumblr.active, .open > .dropdown-toggle.btn-tumblr { color: #ffffff; background-color: #1c2d3f; border-color: rgba(0, 0, 0, 0.2); } .btn-tumblr:active, .btn-tumblr.active, .open > .dropdown-toggle.btn-tumblr { background-image: none; } .btn-tumblr .badge { color: #2c4762; background-color: #ffffff; } .btn-twitter { color: #ffffff; background-color: #55acee; border-color: rgba(0, 0, 0, 0.2); } .btn-twitter:focus, .btn-twitter.focus { color: #ffffff; background-color: #2795e9; border-color: rgba(0, 0, 0, 0.2); } .btn-twitter:hover { color: #ffffff; background-color: #2795e9; border-color: rgba(0, 0, 0, 0.2); } .btn-twitter:active, .btn-twitter.active, .open > .dropdown-toggle.btn-twitter { color: #ffffff; background-color: #2795e9; border-color: rgba(0, 0, 0, 0.2); } .btn-twitter:active, .btn-twitter.active, .open > .dropdown-toggle.btn-twitter { background-image: none; } .btn-twitter .badge { color: #55acee; background-color: #ffffff; } .btn-vimeo { color: #ffffff; background-color: #1ab7ea; border-color: rgba(0, 0, 0, 0.2); } .btn-vimeo:focus, .btn-vimeo.focus { color: #ffffff; background-color: #1295bf; border-color: rgba(0, 0, 0, 0.2); } .btn-vimeo:hover { color: #ffffff; background-color: #1295bf; border-color: rgba(0, 0, 0, 0.2); } .btn-vimeo:active, .btn-vimeo.active, .open > .dropdown-toggle.btn-vimeo { color: #ffffff; background-color: #1295bf; border-color: rgba(0, 0, 0, 0.2); } .btn-vimeo:active, .btn-vimeo.active, .open > .dropdown-toggle.btn-vimeo { background-image: none; } .btn-vimeo .badge { color: #1ab7ea; background-color: #ffffff; } .btn-vk { color: #ffffff; background-color: #587ea3; border-color: rgba(0, 0, 0, 0.2); } .btn-vk:focus, .btn-vk.focus { color: #ffffff; background-color: #466482; border-color: rgba(0, 0, 0, 0.2); } .btn-vk:hover { color: #ffffff; background-color: #466482; border-color: rgba(0, 0, 0, 0.2); } .btn-vk:active, .btn-vk.active, .open > .dropdown-toggle.btn-vk { color: #ffffff; background-color: #466482; border-color: rgba(0, 0, 0, 0.2); } .btn-vk:active, .btn-vk.active, .open > .dropdown-toggle.btn-vk { background-image: none; } .btn-vk .badge { color: #587ea3; background-color: #ffffff; } .btn-yahoo { color: #ffffff; background-color: #720e9e; border-color: rgba(0, 0, 0, 0.2); } .btn-yahoo:focus, .btn-yahoo.focus { color: #ffffff; background-color: #500a6f; border-color: rgba(0, 0, 0, 0.2); } .btn-yahoo:hover { color: #ffffff; background-color: #500a6f; border-color: rgba(0, 0, 0, 0.2); } .btn-yahoo:active, .btn-yahoo.active, .open > .dropdown-toggle.btn-yahoo { color: #ffffff; background-color: #500a6f; border-color: rgba(0, 0, 0, 0.2); } .btn-yahoo:active, .btn-yahoo.active, .open > .dropdown-toggle.btn-yahoo { background-image: none; } .btn-yahoo .badge { color: #720e9e; background-color: #ffffff; } /* * Plugin: Full Calendar * --------------------- */ .fc-button { background: #f4f4f4; background-image: none; color: #444; border-color: #ddd; border-bottom-color: #ddd; } .fc-button:hover, .fc-button:active, .fc-button.hover { background-color: #e9e9e9; } .fc-header-title h2 { font-size: 15px; line-height: 1.6em; color: #666; margin-left: 10px; } .fc-header-right { padding-right: 10px; } .fc-header-left { padding-left: 10px; } .fc-widget-header { background: #fafafa; } .fc-grid { width: 100%; border: 0; } .fc-widget-header:first-of-type, .fc-widget-content:first-of-type { border-left: 0; border-right: 0; } .fc-widget-header:last-of-type, .fc-widget-content:last-of-type { border-right: 0; } .fc-toolbar { padding: 10px; margin: 0; } .fc-day-number { font-size: 20px; font-weight: 300; padding-right: 10px; } .fc-color-picker { list-style: none; margin: 0; padding: 0; } .fc-color-picker > li { float: left; font-size: 30px; margin-right: 5px; line-height: 30px; } .fc-color-picker > li .fa { -webkit-transition: -webkit-transform linear 0.3s; -moz-transition: -moz-transform linear 0.3s; -o-transition: -o-transform linear 0.3s; transition: transform linear 0.3s; } .fc-color-picker > li .fa:hover { -webkit-transform: rotate(30deg); -ms-transform: rotate(30deg); -o-transform: rotate(30deg); transform: rotate(30deg); } #add-new-event { -webkit-transition: all linear 0.3s; -o-transition: all linear 0.3s; transition: all linear 0.3s; } .external-event { padding: 5px 10px; font-weight: bold; margin-bottom: 4px; box-shadow: 0 1px 1px rgba(0, 0, 0, 0.1); text-shadow: 0 1px 1px rgba(0, 0, 0, 0.1); border-radius: 3px; cursor: move; } .external-event:hover { box-shadow: inset 0 0 90px rgba(0, 0, 0, 0.2); } /* * Plugin: Select2 * --------------- */ .select2-container--default.select2-container--focus, .select2-selection.select2-container--focus, .select2-container--default:focus, .select2-selection:focus, .select2-container--default:active, .select2-selection:active { outline: none; } .select2-container--default .select2-selection--single, .select2-selection .select2-selection--single { border: 1px solid #d2d6de; border-radius: 0; padding: 6px 12px; height: 34px; } .select2-container--default.select2-container--open { border-color: #3c8dbc; } .select2-dropdown { border: 1px solid #d2d6de; border-radius: 0; } .select2-container--default .select2-results__option--highlighted[aria-selected] { background-color: #3c8dbc; color: white; } .select2-results__option { padding: 6px 12px; user-select: none; -webkit-user-select: none; } .select2-container .select2-selection--single .select2-selection__rendered { padding-left: 0; padding-right: 0; height: auto; margin-top: -4px; } .select2-container[dir="rtl"] .select2-selection--single .select2-selection__rendered { padding-right: 6px; padding-left: 20px; } .select2-container--default .select2-selection--single .select2-selection__arrow { height: 28px; right: 3px; } .select2-container--default .select2-selection--single .select2-selection__arrow b { margin-top: 0; } .select2-dropdown .select2-search__field, .select2-search--inline .select2-search__field { border: 1px solid #d2d6de; } .select2-dropdown .select2-search__field:focus, .select2-search--inline .select2-search__field:focus { outline: none; border: 1px solid #3c8dbc; } .select2-container--default .select2-results__option[aria-disabled=true] { color: #999; } .select2-container--default .select2-results__option[aria-selected=true] { background-color: #ddd; } .select2-container--default .select2-results__option[aria-selected=true], .select2-container--default .select2-results__option[aria-selected=true]:hover { color: #444; } .select2-container--default .select2-selection--multiple { border: 1px solid #d2d6de; border-radius: 0; } .select2-container--default .select2-selection--multiple:focus { border-color: #3c8dbc; } .select2-container--default.select2-container--focus .select2-selection--multiple { border-color: #d2d6de; } .select2-container--default .select2-selection--multiple .select2-selection__choice { background-color: #3c8dbc; border-color: #367fa9; padding: 1px 10px; color: #fff; } .select2-container--default .select2-selection--multiple .select2-selection__choice__remove { margin-right: 5px; color: rgba(255, 255, 255, 0.7); } .select2-container--default .select2-selection--multiple .select2-selection__choice__remove:hover { color: #fff; } .select2-container .select2-selection--single .select2-selection__rendered { padding-right: 10px; } /* * General: Miscellaneous * ---------------------- */ .pad { padding: 10px; } .margin { margin: 10px; } .margin-bottom { margin-bottom: 20px; } .margin-bottom-none { margin-bottom: 0; } .margin-r-5 { margin-right: 5px; } .inline { display: inline; } .description-block { display: block; margin: 10px 0; text-align: center; } .description-block.margin-bottom { margin-bottom: 25px; } .description-block > .description-header { margin: 0; padding: 0; font-weight: 600; font-size: 16px; } .description-block > .description-text { text-transform: uppercase; } .bg-red, .bg-yellow, .bg-aqua, .bg-blue, .bg-light-blue, .bg-green, .bg-navy, .bg-teal, .bg-olive, .bg-lime, .bg-orange, .bg-fuchsia, .bg-purple, .bg-maroon, .bg-black, .bg-red-active, .bg-yellow-active, .bg-aqua-active, .bg-blue-active, .bg-light-blue-active, .bg-green-active, .bg-navy-active, .bg-teal-active, .bg-olive-active, .bg-lime-active, .bg-orange-active, .bg-fuchsia-active, .bg-purple-active, .bg-maroon-active, .bg-black-active, .callout.callout-danger, .callout.callout-warning, .callout.callout-info, .callout.callout-success, .alert-success, .alert-danger, .alert-error, .alert-warning, .alert-info, .label-danger, .label-info, .label-warning, .label-primary, .label-success, .modal-primary .modal-body, .modal-primary .modal-header, .modal-primary .modal-footer, .modal-warning .modal-body, .modal-warning .modal-header, .modal-warning .modal-footer, .modal-info .modal-body, .modal-info .modal-header, .modal-info .modal-footer, .modal-success .modal-body, .modal-success .modal-header, .modal-success .modal-footer, .modal-danger .modal-body, .modal-danger .modal-header, .modal-danger .modal-footer { color: #fff !important; } .bg-gray { color: #000; background-color: #d2d6de !important; } .bg-gray-light { background-color: #f7f7f7; } .bg-black { background-color: #111111 !important; } .bg-red, .callout.callout-danger, .alert-danger, .alert-error, .label-danger, .modal-danger .modal-body { background-color: #dd4b39 !important; } .bg-yellow, .callout.callout-warning, .alert-warning, .label-warning, .modal-warning .modal-body { background-color: #f39c12 !important; } .bg-aqua, .callout.callout-info, .alert-info, .label-info, .modal-info .modal-body { background-color: #00c0ef !important; } .bg-blue { background-color: #0073b7 !important; } .bg-light-blue, .label-primary, .modal-primary .modal-body { background-color: #3c8dbc !important; } .bg-green, .callout.callout-success, .alert-success, .label-success, .modal-success .modal-body { background-color: #00a65a !important; } .bg-navy { background-color: #001f3f !important; } .bg-teal { background-color: #39cccc !important; } .bg-olive { background-color: #3d9970 !important; } .bg-lime { background-color: #01ff70 !important; } .bg-orange { background-color: #ff851b !important; } .bg-fuchsia { background-color: #f012be !important; } .bg-purple { background-color: #605ca8 !important; } .bg-maroon { background-color: #d81b60 !important; } .bg-gray-active { color: #000; background-color: #b5bbc8 !important; } .bg-black-active { background-color: #000000 !important; } .bg-red-active, .modal-danger .modal-header, .modal-danger .modal-footer { background-color: #d33724 !important; } .bg-yellow-active, .modal-warning .modal-header, .modal-warning .modal-footer { background-color: #db8b0b !important; } .bg-aqua-active, .modal-info .modal-header, .modal-info .modal-footer { background-color: #00a7d0 !important; } .bg-blue-active { background-color: #005384 !important; } .bg-light-blue-active, .modal-primary .modal-header, .modal-primary .modal-footer { background-color: #357ca5 !important; } .bg-green-active, .modal-success .modal-header, .modal-success .modal-footer { background-color: #008d4c !important; } .bg-navy-active { background-color: #001a35 !important; } .bg-teal-active { background-color: #30bbbb !important; } .bg-olive-active { background-color: #368763 !important; } .bg-lime-active { background-color: #00e765 !important; } .bg-orange-active { background-color: #ff7701 !important; } .bg-fuchsia-active { background-color: #db0ead !important; } .bg-purple-active { background-color: #555299 !important; } .bg-maroon-active { background-color: #ca195a !important; } [class^="bg-"].disabled { opacity: 0.65; filter: alpha(opacity=65); } .text-red { color: #dd4b39 !important; } .text-yellow { color: #f39c12 !important; } .text-aqua { color: #00c0ef !important; } .text-blue { color: #0073b7 !important; } .text-black { color: #111111 !important; } .text-light-blue { color: #3c8dbc !important; } .text-green { color: #00a65a !important; } .text-gray { color: #d2d6de !important; } .text-navy { color: #001f3f !important; } .text-teal { color: #39cccc !important; } .text-olive { color: #3d9970 !important; } .text-lime { color: #01ff70 !important; } .text-orange { color: #ff851b !important; } .text-fuchsia { color: #f012be !important; } .text-purple { color: #605ca8 !important; } .text-maroon { color: #d81b60 !important; } .link-muted { color: #7a869d; } .link-muted:hover, .link-muted:focus { color: #606c84; } .link-black { color: #666; } .link-black:hover, .link-black:focus { color: #999; } .hide { display: none !important; } .no-border { border: 0 !important; } .no-padding { padding: 0 !important; } .no-margin { margin: 0 !important; } .no-shadow { box-shadow: none !important; } .list-unstyled, .chart-legend, .contacts-list, .users-list, .mailbox-attachments { list-style: none; margin: 0; padding: 0; } .list-group-unbordered > .list-group-item { border-left: 0; border-right: 0; border-radius: 0; padding-left: 0; padding-right: 0; } .flat { border-radius: 0 !important; } .text-bold, .text-bold.table td, .text-bold.table th { font-weight: 700; } .text-sm { font-size: 12px; } .jqstooltip { padding: 5px !important; width: auto !important; height: auto !important; } .bg-teal-gradient { background: #39cccc !important; background: -webkit-gradient(linear, left bottom, left top, color-stop(0, #39cccc), color-stop(1, #7adddd)) !important; background: -ms-linear-gradient(bottom, #39cccc, #7adddd) !important; background: -moz-linear-gradient(center bottom, #39cccc 0%, #7adddd 100%) !important; background: -o-linear-gradient(#7adddd, #39cccc) !important; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#7adddd', endColorstr='#39cccc', GradientType=0) !important; color: #fff; } .bg-light-blue-gradient { background: #3c8dbc !important; background: -webkit-gradient(linear, left bottom, left top, color-stop(0, #3c8dbc), color-stop(1, #67a8ce)) !important; background: -ms-linear-gradient(bottom, #3c8dbc, #67a8ce) !important; background: -moz-linear-gradient(center bottom, #3c8dbc 0%, #67a8ce 100%) !important; background: -o-linear-gradient(#67a8ce, #3c8dbc) !important; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#67a8ce', endColorstr='#3c8dbc', GradientType=0) !important; color: #fff; } .bg-blue-gradient { background: #0073b7 !important; background: -webkit-gradient(linear, left bottom, left top, color-stop(0, #0073b7), color-stop(1, #0089db)) !important; background: -ms-linear-gradient(bottom, #0073b7, #0089db) !important; background: -moz-linear-gradient(center bottom, #0073b7 0%, #0089db 100%) !important; background: -o-linear-gradient(#0089db, #0073b7) !important; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#0089db', endColorstr='#0073b7', GradientType=0) !important; color: #fff; } .bg-aqua-gradient { background: #00c0ef !important; background: -webkit-gradient(linear, left bottom, left top, color-stop(0, #00c0ef), color-stop(1, #14d1ff)) !important; background: -ms-linear-gradient(bottom, #00c0ef, #14d1ff) !important; background: -moz-linear-gradient(center bottom, #00c0ef 0%, #14d1ff 100%) !important; background: -o-linear-gradient(#14d1ff, #00c0ef) !important; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#14d1ff', endColorstr='#00c0ef', GradientType=0) !important; color: #fff; } .bg-yellow-gradient { background: #f39c12 !important; background: -webkit-gradient(linear, left bottom, left top, color-stop(0, #f39c12), color-stop(1, #f7bc60)) !important; background: -ms-linear-gradient(bottom, #f39c12, #f7bc60) !important; background: -moz-linear-gradient(center bottom, #f39c12 0%, #f7bc60 100%) !important; background: -o-linear-gradient(#f7bc60, #f39c12) !important; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#f7bc60', endColorstr='#f39c12', GradientType=0) !important; color: #fff; } .bg-purple-gradient { background: #605ca8 !important; background: -webkit-gradient(linear, left bottom, left top, color-stop(0, #605ca8), color-stop(1, #9491c4)) !important; background: -ms-linear-gradient(bottom, #605ca8, #9491c4) !important; background: -moz-linear-gradient(center bottom, #605ca8 0%, #9491c4 100%) !important; background: -o-linear-gradient(#9491c4, #605ca8) !important; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#9491c4', endColorstr='#605ca8', GradientType=0) !important; color: #fff; } .bg-green-gradient { background: #00a65a !important; background: -webkit-gradient(linear, left bottom, left top, color-stop(0, #00a65a), color-stop(1, #00ca6d)) !important; background: -ms-linear-gradient(bottom, #00a65a, #00ca6d) !important; background: -moz-linear-gradient(center bottom, #00a65a 0%, #00ca6d 100%) !important; background: -o-linear-gradient(#00ca6d, #00a65a) !important; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00ca6d', endColorstr='#00a65a', GradientType=0) !important; color: #fff; } .bg-red-gradient { background: #dd4b39 !important; background: -webkit-gradient(linear, left bottom, left top, color-stop(0, #dd4b39), color-stop(1, #e47365)) !important; background: -ms-linear-gradient(bottom, #dd4b39, #e47365) !important; background: -moz-linear-gradient(center bottom, #dd4b39 0%, #e47365 100%) !important; background: -o-linear-gradient(#e47365, #dd4b39) !important; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#e47365', endColorstr='#dd4b39', GradientType=0) !important; color: #fff; } .bg-black-gradient { background: #111111 !important; background: -webkit-gradient(linear, left bottom, left top, color-stop(0, #111111), color-stop(1, #2b2b2b)) !important; background: -ms-linear-gradient(bottom, #111111, #2b2b2b) !important; background: -moz-linear-gradient(center bottom, #111111 0%, #2b2b2b 100%) !important; background: -o-linear-gradient(#2b2b2b, #111111) !important; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#2b2b2b', endColorstr='#111111', GradientType=0) !important; color: #fff; } .bg-maroon-gradient { background: #d81b60 !important; background: -webkit-gradient(linear, left bottom, left top, color-stop(0, #d81b60), color-stop(1, #e73f7c)) !important; background: -ms-linear-gradient(bottom, #d81b60, #e73f7c) !important; background: -moz-linear-gradient(center bottom, #d81b60 0%, #e73f7c 100%) !important; background: -o-linear-gradient(#e73f7c, #d81b60) !important; filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#e73f7c', endColorstr='#d81b60', GradientType=0) !important; color: #fff; } .description-block .description-icon { font-size: 16px; } .no-pad-top { padding-top: 0; } .position-static { position: static !important; } .list-header { font-size: 15px; padding: 10px 4px; font-weight: bold; color: #666; } .list-seperator { height: 1px; background: #f4f4f4; margin: 15px 0 9px 0; } .list-link > a { padding: 4px; color: #777; } .list-link > a:hover { color: #222; } .font-light { font-weight: 300; } .user-block:before, .user-block:after { content: " "; display: table; } .user-block:after { clear: both; } .user-block img { width: 40px; height: 40px; float: left; } .user-block .username, .user-block .description, .user-block .comment { display: block; margin-left: 50px; } .user-block .username { font-size: 16px; font-weight: 600; } .user-block .description { color: #999; font-size: 13px; } .user-block.user-block-sm .username, .user-block.user-block-sm .description, .user-block.user-block-sm .comment { margin-left: 40px; } .user-block.user-block-sm .username { font-size: 14px; } .img-sm, .img-md, .img-lg, .box-comments .box-comment img, .user-block.user-block-sm img { float: left; } .img-sm, .box-comments .box-comment img, .user-block.user-block-sm img { width: 30px !important; height: 30px !important; } .img-sm + .img-push { margin-left: 40px; } .img-md { width: 60px; height: 60px; } .img-md + .img-push { margin-left: 70px; } .img-lg { width: 100px; height: 100px; } .img-lg + .img-push { margin-left: 110px; } .img-bordered { border: 3px solid #d2d6de; padding: 3px; } .img-bordered-sm { border: 2px solid #d2d6de; padding: 2px; } .attachment-block { border: 1px solid #f4f4f4; padding: 5px; margin-bottom: 10px; background: #f7f7f7; } .attachment-block .attachment-img { max-width: 100px; max-height: 100px; height: auto; float: left; } .attachment-block .attachment-pushed { margin-left: 110px; } .attachment-block .attachment-heading { margin: 0; } .attachment-block .attachment-text { color: #555; } .connectedSortable { min-height: 100px; } .ui-helper-hidden-accessible { border: 0; clip: rect(0 0 0 0); height: 1px; margin: -1px; overflow: hidden; padding: 0; position: absolute; width: 1px; } .sort-highlight { background: #f4f4f4; border: 1px dashed #ddd; margin-bottom: 10px; } .full-opacity-hover { opacity: 0.65; filter: alpha(opacity=65); } .full-opacity-hover:hover { opacity: 1; filter: alpha(opacity=100); } .chart { position: relative; overflow: hidden; width: 100%; } .chart svg, .chart canvas { width: 100% !important; } /* * Misc: print * ----------- */ @media print { .no-print, .main-sidebar, .left-side, .main-header, .content-header { display: none !important; } .content-wrapper, .right-side, .main-footer { margin-left: 0 !important; min-height: 0 !important; -webkit-transform: translate(0, 0) !important; -ms-transform: translate(0, 0) !important; -o-transform: translate(0, 0) !important; transform: translate(0, 0) !important; } .fixed .content-wrapper, .fixed .right-side { padding-top: 0 !important; } .invoice { width: 100%; border: 0; margin: 0; padding: 0; } .invoice-col { float: left; width: 33.3333333%; } .table-responsive { overflow: auto; } .table-responsive > .table tr th, .table-responsive > .table tr td { white-space: normal !important; } } ================================================ FILE: web/static/dist/css/filebox.css ================================================ .file-box { float: left; width: 220px; } .file { border: 1px solid #e7eaec; padding: 0; background-color: #ffffff; position: relative; margin-bottom: 20px; margin-right: 20px; } .file .icon { padding: 15px 10px; text-align: center; } .file .icon i { font-size: 70px; color: #dadada; } .file .file-name { padding: 10px; background-color: #f8f8f8; border-top: 1px solid #e7eaec; } .corner { position: absolute; display: inline-block; width: 0; height: 0; line-height: 0; border: 0.6em solid transparent; border-right: 0.6em solid #f1f1f1; border-bottom: 0.6em solid #f1f1f1; right: 0em; bottom: 0em; } a:hover{ text-decoration:none; } ================================================ FILE: web/static/dist/css/flotconfig.css ================================================ /* FLOT CHART */ .flot-chart { display: block; height: 200px; } .widget .flot-chart.dashboard-chart { display: block; height: 120px; margin-top: 40px; } .flot-chart.dashboard-chart { display: block; height: 180px; margin-top: 40px; } .flot-chart-content { width: 100%; height: 100%; } .flot-chart-pie-content { width: 200px; height: 200px; margin: auto; } .jqstooltip { position: absolute; display: block; left: 0px; top: 0px; visibility: hidden; background: #2b303a; background-color: rgba(43, 48, 58, 0.8); color: white; text-align: left; white-space: nowrap; z-index: 10000; padding: 5px 5px 5px 5px; min-height: 22px; border-radius: 3px; } .jqsfield { color: white; text-align: left; } .h-200 { min-height: 200px; } .legendLabel { padding-left: 5px; } .stat-list li:first-child { margin-top: 0; } .stat-list { list-style: none; padding: 0; margin: 0; } .stat-percent { float: right; } .stat-list li { margin-top: 15px; position: relative; } ================================================ FILE: web/static/dist/css/modalconfig.css ================================================ /* MODAL */ .modal-content { background-clip: padding-box; background-color: #FFFFFF; border: 1px solid rgba(0, 0, 0, 0); border-radius: 4px; box-shadow: 0 1px 3px rgba(0, 0, 0, 0.3); outline: 0 none; position: relative; } .modal-dialog { z-index: 2200; } .modal-body { padding: 20px 30px 30px 30px; } .inmodal .modal-body { background: #f8fafb; } .inmodal .modal-header { padding: 30px 15px; text-align: center; } .animated.modal.fade .modal-dialog { -webkit-transform: none; -ms-transform: none; -o-transform: none; transform: none; } .inmodal .modal-title { font-size: 26px; } .inmodal .modal-icon { font-size: 84px; color: #e2e3e3; } .modal-footer { margin-top: 0; } ================================================ FILE: web/static/dist/css/skins/_all-skins.css ================================================ /* * Skin: Blue * ---------- */ .skin-blue .main-header .navbar { background-color: #3c8dbc; } .skin-blue .main-header .navbar .nav > li > a { color: #ffffff; } .skin-blue .main-header .navbar .nav > li > a:hover, .skin-blue .main-header .navbar .nav > li > a:active, .skin-blue .main-header .navbar .nav > li > a:focus, .skin-blue .main-header .navbar .nav .open > a, .skin-blue .main-header .navbar .nav .open > a:hover, .skin-blue .main-header .navbar .nav .open > a:focus, .skin-blue .main-header .navbar .nav > .active > a { background: rgba(0, 0, 0, 0.1); color: #f6f6f6; } .skin-blue .main-header .navbar .sidebar-toggle { color: #ffffff; } .skin-blue .main-header .navbar .sidebar-toggle:hover { color: #f6f6f6; background: rgba(0, 0, 0, 0.1); } .skin-blue .main-header .navbar .sidebar-toggle { color: #fff; } .skin-blue .main-header .navbar .sidebar-toggle:hover { background-color: #367fa9; } @media (max-width: 767px) { .skin-blue .main-header .navbar .dropdown-menu li.divider { background-color: rgba(255, 255, 255, 0.1); } .skin-blue .main-header .navbar .dropdown-menu li a { color: #fff; } .skin-blue .main-header .navbar .dropdown-menu li a:hover { background: #367fa9; } } .skin-blue .main-header .logo { background-color: #367fa9; color: #ffffff; border-bottom: 0 solid transparent; } .skin-blue .main-header .logo:hover { background-color: #357ca5; } .skin-blue .main-header li.user-header { background-color: #3c8dbc; } .skin-blue .content-header { background: transparent; } .skin-blue .wrapper, .skin-blue .main-sidebar, .skin-blue .left-side { background-color: #222d32; } .skin-blue .user-panel > .info, .skin-blue .user-panel > .info > a { color: #fff; } .skin-blue .sidebar-menu > li.header { color: #4b646f; background: #1a2226; } .skin-blue .sidebar-menu > li > a { border-left: 3px solid transparent; } .skin-blue .sidebar-menu > li:hover > a, .skin-blue .sidebar-menu > li.active > a { color: #ffffff; background: #1e282c; border-left-color: #3c8dbc; } .skin-blue .sidebar-menu > li > .treeview-menu { margin: 0 1px; background: #2c3b41; } .skin-blue .sidebar a { color: #b8c7ce; } .skin-blue .sidebar a:hover { text-decoration: none; } .skin-blue .treeview-menu > li > a { color: #8aa4af; } .skin-blue .treeview-menu > li.active > a, .skin-blue .treeview-menu > li > a:hover { color: #ffffff; } .skin-blue .sidebar-form { border-radius: 3px; border: 1px solid #374850; margin: 10px 10px; } .skin-blue .sidebar-form input[type="text"], .skin-blue .sidebar-form .btn { box-shadow: none; background-color: #374850; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-blue .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-blue .sidebar-form input[type="text"]:focus, .skin-blue .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-blue .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-blue .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } .skin-blue.layout-top-nav .main-header > .logo { background-color: #3c8dbc; color: #ffffff; border-bottom: 0 solid transparent; } .skin-blue.layout-top-nav .main-header > .logo:hover { background-color: #3b8ab8; } /* * Skin: Blue * ---------- */ .skin-blue-light .main-header .navbar { background-color: #3c8dbc; } .skin-blue-light .main-header .navbar .nav > li > a { color: #ffffff; } .skin-blue-light .main-header .navbar .nav > li > a:hover, .skin-blue-light .main-header .navbar .nav > li > a:active, .skin-blue-light .main-header .navbar .nav > li > a:focus, .skin-blue-light .main-header .navbar .nav .open > a, .skin-blue-light .main-header .navbar .nav .open > a:hover, .skin-blue-light .main-header .navbar .nav .open > a:focus, .skin-blue-light .main-header .navbar .nav > .active > a { background: rgba(0, 0, 0, 0.1); color: #f6f6f6; } .skin-blue-light .main-header .navbar .sidebar-toggle { color: #ffffff; } .skin-blue-light .main-header .navbar .sidebar-toggle:hover { color: #f6f6f6; background: rgba(0, 0, 0, 0.1); } .skin-blue-light .main-header .navbar .sidebar-toggle { color: #fff; } .skin-blue-light .main-header .navbar .sidebar-toggle:hover { background-color: #367fa9; } @media (max-width: 767px) { .skin-blue-light .main-header .navbar .dropdown-menu li.divider { background-color: rgba(255, 255, 255, 0.1); } .skin-blue-light .main-header .navbar .dropdown-menu li a { color: #fff; } .skin-blue-light .main-header .navbar .dropdown-menu li a:hover { background: #367fa9; } } .skin-blue-light .main-header .logo { background-color: #3c8dbc; color: #ffffff; border-bottom: 0 solid transparent; } .skin-blue-light .main-header .logo:hover { background-color: #3b8ab8; } .skin-blue-light .main-header li.user-header { background-color: #3c8dbc; } .skin-blue-light .content-header { background: transparent; } .skin-blue-light .wrapper, .skin-blue-light .main-sidebar, .skin-blue-light .left-side { background-color: #f9fafc; } .skin-blue-light .content-wrapper, .skin-blue-light .main-footer { border-left: 1px solid #d2d6de; } .skin-blue-light .user-panel > .info, .skin-blue-light .user-panel > .info > a { color: #444444; } .skin-blue-light .sidebar-menu > li { -webkit-transition: border-left-color 0.3s ease; -o-transition: border-left-color 0.3s ease; transition: border-left-color 0.3s ease; } .skin-blue-light .sidebar-menu > li.header { color: #848484; background: #f9fafc; } .skin-blue-light .sidebar-menu > li > a { border-left: 3px solid transparent; font-weight: 600; } .skin-blue-light .sidebar-menu > li:hover > a, .skin-blue-light .sidebar-menu > li.active > a { color: #000000; background: #f4f4f5; } .skin-blue-light .sidebar-menu > li.active { border-left-color: #3c8dbc; } .skin-blue-light .sidebar-menu > li.active > a { font-weight: 600; } .skin-blue-light .sidebar-menu > li > .treeview-menu { background: #f4f4f5; } .skin-blue-light .sidebar a { color: #444444; } .skin-blue-light .sidebar a:hover { text-decoration: none; } .skin-blue-light .treeview-menu > li > a { color: #777777; } .skin-blue-light .treeview-menu > li.active > a, .skin-blue-light .treeview-menu > li > a:hover { color: #000000; } .skin-blue-light .treeview-menu > li.active > a { font-weight: 600; } .skin-blue-light .sidebar-form { border-radius: 3px; border: 1px solid #d2d6de; margin: 10px 10px; } .skin-blue-light .sidebar-form input[type="text"], .skin-blue-light .sidebar-form .btn { box-shadow: none; background-color: #fff; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-blue-light .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-blue-light .sidebar-form input[type="text"]:focus, .skin-blue-light .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-blue-light .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-blue-light .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } @media (min-width: 768px) { .skin-blue-light.sidebar-mini.sidebar-collapse .sidebar-menu > li > .treeview-menu { border-left: 1px solid #d2d6de; } } .skin-blue-light .main-footer { border-top-color: #d2d6de; } .skin-blue.layout-top-nav .main-header > .logo { background-color: #3c8dbc; color: #ffffff; border-bottom: 0 solid transparent; } .skin-blue.layout-top-nav .main-header > .logo:hover { background-color: #3b8ab8; } /* * Skin: Black * ----------- */ /* skin-black navbar */ .skin-black .main-header { -webkit-box-shadow: 0px 1px 1px rgba(0, 0, 0, 0.05); box-shadow: 0px 1px 1px rgba(0, 0, 0, 0.05); } .skin-black .main-header .navbar-toggle { color: #333; } .skin-black .main-header .navbar-brand { color: #333; border-right: 1px solid #eee; } .skin-black .main-header > .navbar { background-color: #ffffff; } .skin-black .main-header > .navbar .nav > li > a { color: #333333; } .skin-black .main-header > .navbar .nav > li > a:hover, .skin-black .main-header > .navbar .nav > li > a:active, .skin-black .main-header > .navbar .nav > li > a:focus, .skin-black .main-header > .navbar .nav .open > a, .skin-black .main-header > .navbar .nav .open > a:hover, .skin-black .main-header > .navbar .nav .open > a:focus, .skin-black .main-header > .navbar .nav > .active > a { background: #ffffff; color: #999999; } .skin-black .main-header > .navbar .sidebar-toggle { color: #333333; } .skin-black .main-header > .navbar .sidebar-toggle:hover { color: #999999; background: #ffffff; } .skin-black .main-header > .navbar > .sidebar-toggle { color: #333; border-right: 1px solid #eee; } .skin-black .main-header > .navbar .navbar-nav > li > a { border-right: 1px solid #eee; } .skin-black .main-header > .navbar .navbar-custom-menu .navbar-nav > li > a, .skin-black .main-header > .navbar .navbar-right > li > a { border-left: 1px solid #eee; border-right-width: 0; } .skin-black .main-header > .logo { background-color: #ffffff; color: #333333; border-bottom: 0 solid transparent; border-right: 1px solid #eee; } .skin-black .main-header > .logo:hover { background-color: #fcfcfc; } @media (max-width: 767px) { .skin-black .main-header > .logo { background-color: #222222; color: #ffffff; border-bottom: 0 solid transparent; border-right: none; } .skin-black .main-header > .logo:hover { background-color: #1f1f1f; } } .skin-black .main-header li.user-header { background-color: #222; } .skin-black .content-header { background: transparent; box-shadow: none; } .skin-black .wrapper, .skin-black .main-sidebar, .skin-black .left-side { background-color: #222d32; } .skin-black .user-panel > .info, .skin-black .user-panel > .info > a { color: #fff; } .skin-black .sidebar-menu > li.header { color: #4b646f; background: #1a2226; } .skin-black .sidebar-menu > li > a { border-left: 3px solid transparent; } .skin-black .sidebar-menu > li:hover > a, .skin-black .sidebar-menu > li.active > a { color: #ffffff; background: #1e282c; border-left-color: #ffffff; } .skin-black .sidebar-menu > li > .treeview-menu { margin: 0 1px; background: #2c3b41; } .skin-black .sidebar a { color: #b8c7ce; } .skin-black .sidebar a:hover { text-decoration: none; } .skin-black .treeview-menu > li > a { color: #8aa4af; } .skin-black .treeview-menu > li.active > a, .skin-black .treeview-menu > li > a:hover { color: #ffffff; } .skin-black .sidebar-form { border-radius: 3px; border: 1px solid #374850; margin: 10px 10px; } .skin-black .sidebar-form input[type="text"], .skin-black .sidebar-form .btn { box-shadow: none; background-color: #374850; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-black .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-black .sidebar-form input[type="text"]:focus, .skin-black .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-black .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-black .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } .skin-black .pace .pace-progress { background: #222; } .skin-black .pace .pace-activity { border-top-color: #222; border-left-color: #222; } /* * Skin: Black * ----------- */ /* skin-black navbar */ .skin-black-light .main-header { -webkit-box-shadow: 0px 1px 1px rgba(0, 0, 0, 0.05); box-shadow: 0px 1px 1px rgba(0, 0, 0, 0.05); } .skin-black-light .main-header .navbar-toggle { color: #333; } .skin-black-light .main-header .navbar-brand { color: #333; border-right: 1px solid #eee; } .skin-black-light .main-header > .navbar { background-color: #ffffff; } .skin-black-light .main-header > .navbar .nav > li > a { color: #333333; } .skin-black-light .main-header > .navbar .nav > li > a:hover, .skin-black-light .main-header > .navbar .nav > li > a:active, .skin-black-light .main-header > .navbar .nav > li > a:focus, .skin-black-light .main-header > .navbar .nav .open > a, .skin-black-light .main-header > .navbar .nav .open > a:hover, .skin-black-light .main-header > .navbar .nav .open > a:focus, .skin-black-light .main-header > .navbar .nav > .active > a { background: #ffffff; color: #999999; } .skin-black-light .main-header > .navbar .sidebar-toggle { color: #333333; } .skin-black-light .main-header > .navbar .sidebar-toggle:hover { color: #999999; background: #ffffff; } .skin-black-light .main-header > .navbar > .sidebar-toggle { color: #333; border-right: 1px solid #eee; } .skin-black-light .main-header > .navbar .navbar-nav > li > a { border-right: 1px solid #eee; } .skin-black-light .main-header > .navbar .navbar-custom-menu .navbar-nav > li > a, .skin-black-light .main-header > .navbar .navbar-right > li > a { border-left: 1px solid #eee; border-right-width: 0; } .skin-black-light .main-header > .logo { background-color: #ffffff; color: #333333; border-bottom: 0 solid transparent; border-right: 1px solid #eee; } .skin-black-light .main-header > .logo:hover { background-color: #fcfcfc; } @media (max-width: 767px) { .skin-black-light .main-header > .logo { background-color: #222222; color: #ffffff; border-bottom: 0 solid transparent; border-right: none; } .skin-black-light .main-header > .logo:hover { background-color: #1f1f1f; } } .skin-black-light .main-header li.user-header { background-color: #222; } .skin-black-light .content-header { background: transparent; box-shadow: none; } .skin-black-light .wrapper, .skin-black-light .main-sidebar, .skin-black-light .left-side { background-color: #f9fafc; } .skin-black-light .content-wrapper, .skin-black-light .main-footer { border-left: 1px solid #d2d6de; } .skin-black-light .user-panel > .info, .skin-black-light .user-panel > .info > a { color: #444444; } .skin-black-light .sidebar-menu > li { -webkit-transition: border-left-color 0.3s ease; -o-transition: border-left-color 0.3s ease; transition: border-left-color 0.3s ease; } .skin-black-light .sidebar-menu > li.header { color: #848484; background: #f9fafc; } .skin-black-light .sidebar-menu > li > a { border-left: 3px solid transparent; font-weight: 600; } .skin-black-light .sidebar-menu > li:hover > a, .skin-black-light .sidebar-menu > li.active > a { color: #000000; background: #f4f4f5; } .skin-black-light .sidebar-menu > li.active { border-left-color: #ffffff; } .skin-black-light .sidebar-menu > li.active > a { font-weight: 600; } .skin-black-light .sidebar-menu > li > .treeview-menu { background: #f4f4f5; } .skin-black-light .sidebar a { color: #444444; } .skin-black-light .sidebar a:hover { text-decoration: none; } .skin-black-light .treeview-menu > li > a { color: #777777; } .skin-black-light .treeview-menu > li.active > a, .skin-black-light .treeview-menu > li > a:hover { color: #000000; } .skin-black-light .treeview-menu > li.active > a { font-weight: 600; } .skin-black-light .sidebar-form { border-radius: 3px; border: 1px solid #d2d6de; margin: 10px 10px; } .skin-black-light .sidebar-form input[type="text"], .skin-black-light .sidebar-form .btn { box-shadow: none; background-color: #fff; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-black-light .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-black-light .sidebar-form input[type="text"]:focus, .skin-black-light .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-black-light .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-black-light .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } @media (min-width: 768px) { .skin-black-light.sidebar-mini.sidebar-collapse .sidebar-menu > li > .treeview-menu { border-left: 1px solid #d2d6de; } } /* * Skin: Green * ----------- */ .skin-green .main-header .navbar { background-color: #00a65a; } .skin-green .main-header .navbar .nav > li > a { color: #ffffff; } .skin-green .main-header .navbar .nav > li > a:hover, .skin-green .main-header .navbar .nav > li > a:active, .skin-green .main-header .navbar .nav > li > a:focus, .skin-green .main-header .navbar .nav .open > a, .skin-green .main-header .navbar .nav .open > a:hover, .skin-green .main-header .navbar .nav .open > a:focus, .skin-green .main-header .navbar .nav > .active > a { background: rgba(0, 0, 0, 0.1); color: #f6f6f6; } .skin-green .main-header .navbar .sidebar-toggle { color: #ffffff; } .skin-green .main-header .navbar .sidebar-toggle:hover { color: #f6f6f6; background: rgba(0, 0, 0, 0.1); } .skin-green .main-header .navbar .sidebar-toggle { color: #fff; } .skin-green .main-header .navbar .sidebar-toggle:hover { background-color: #008d4c; } @media (max-width: 767px) { .skin-green .main-header .navbar .dropdown-menu li.divider { background-color: rgba(255, 255, 255, 0.1); } .skin-green .main-header .navbar .dropdown-menu li a { color: #fff; } .skin-green .main-header .navbar .dropdown-menu li a:hover { background: #008d4c; } } .skin-green .main-header .logo { background-color: #008d4c; color: #ffffff; border-bottom: 0 solid transparent; } .skin-green .main-header .logo:hover { background-color: #008749; } .skin-green .main-header li.user-header { background-color: #00a65a; } .skin-green .content-header { background: transparent; } .skin-green .wrapper, .skin-green .main-sidebar, .skin-green .left-side { background-color: #222d32; } .skin-green .user-panel > .info, .skin-green .user-panel > .info > a { color: #fff; } .skin-green .sidebar-menu > li.header { color: #4b646f; background: #1a2226; } .skin-green .sidebar-menu > li > a { border-left: 3px solid transparent; } .skin-green .sidebar-menu > li:hover > a, .skin-green .sidebar-menu > li.active > a { color: #ffffff; background: #1e282c; border-left-color: #00a65a; } .skin-green .sidebar-menu > li > .treeview-menu { margin: 0 1px; background: #2c3b41; } .skin-green .sidebar a { color: #b8c7ce; } .skin-green .sidebar a:hover { text-decoration: none; } .skin-green .treeview-menu > li > a { color: #8aa4af; } .skin-green .treeview-menu > li.active > a, .skin-green .treeview-menu > li > a:hover { color: #ffffff; } .skin-green .sidebar-form { border-radius: 3px; border: 1px solid #374850; margin: 10px 10px; } .skin-green .sidebar-form input[type="text"], .skin-green .sidebar-form .btn { box-shadow: none; background-color: #374850; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-green .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-green .sidebar-form input[type="text"]:focus, .skin-green .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-green .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-green .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } /* * Skin: Green * ----------- */ .skin-green-light .main-header .navbar { background-color: #00a65a; } .skin-green-light .main-header .navbar .nav > li > a { color: #ffffff; } .skin-green-light .main-header .navbar .nav > li > a:hover, .skin-green-light .main-header .navbar .nav > li > a:active, .skin-green-light .main-header .navbar .nav > li > a:focus, .skin-green-light .main-header .navbar .nav .open > a, .skin-green-light .main-header .navbar .nav .open > a:hover, .skin-green-light .main-header .navbar .nav .open > a:focus, .skin-green-light .main-header .navbar .nav > .active > a { background: rgba(0, 0, 0, 0.1); color: #f6f6f6; } .skin-green-light .main-header .navbar .sidebar-toggle { color: #ffffff; } .skin-green-light .main-header .navbar .sidebar-toggle:hover { color: #f6f6f6; background: rgba(0, 0, 0, 0.1); } .skin-green-light .main-header .navbar .sidebar-toggle { color: #fff; } .skin-green-light .main-header .navbar .sidebar-toggle:hover { background-color: #008d4c; } @media (max-width: 767px) { .skin-green-light .main-header .navbar .dropdown-menu li.divider { background-color: rgba(255, 255, 255, 0.1); } .skin-green-light .main-header .navbar .dropdown-menu li a { color: #fff; } .skin-green-light .main-header .navbar .dropdown-menu li a:hover { background: #008d4c; } } .skin-green-light .main-header .logo { background-color: #00a65a; color: #ffffff; border-bottom: 0 solid transparent; } .skin-green-light .main-header .logo:hover { background-color: #00a157; } .skin-green-light .main-header li.user-header { background-color: #00a65a; } .skin-green-light .content-header { background: transparent; } .skin-green-light .wrapper, .skin-green-light .main-sidebar, .skin-green-light .left-side { background-color: #f9fafc; } .skin-green-light .content-wrapper, .skin-green-light .main-footer { border-left: 1px solid #d2d6de; } .skin-green-light .user-panel > .info, .skin-green-light .user-panel > .info > a { color: #444444; } .skin-green-light .sidebar-menu > li { -webkit-transition: border-left-color 0.3s ease; -o-transition: border-left-color 0.3s ease; transition: border-left-color 0.3s ease; } .skin-green-light .sidebar-menu > li.header { color: #848484; background: #f9fafc; } .skin-green-light .sidebar-menu > li > a { border-left: 3px solid transparent; font-weight: 600; } .skin-green-light .sidebar-menu > li:hover > a, .skin-green-light .sidebar-menu > li.active > a { color: #000000; background: #f4f4f5; } .skin-green-light .sidebar-menu > li.active { border-left-color: #00a65a; } .skin-green-light .sidebar-menu > li.active > a { font-weight: 600; } .skin-green-light .sidebar-menu > li > .treeview-menu { background: #f4f4f5; } .skin-green-light .sidebar a { color: #444444; } .skin-green-light .sidebar a:hover { text-decoration: none; } .skin-green-light .treeview-menu > li > a { color: #777777; } .skin-green-light .treeview-menu > li.active > a, .skin-green-light .treeview-menu > li > a:hover { color: #000000; } .skin-green-light .treeview-menu > li.active > a { font-weight: 600; } .skin-green-light .sidebar-form { border-radius: 3px; border: 1px solid #d2d6de; margin: 10px 10px; } .skin-green-light .sidebar-form input[type="text"], .skin-green-light .sidebar-form .btn { box-shadow: none; background-color: #fff; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-green-light .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-green-light .sidebar-form input[type="text"]:focus, .skin-green-light .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-green-light .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-green-light .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } @media (min-width: 768px) { .skin-green-light.sidebar-mini.sidebar-collapse .sidebar-menu > li > .treeview-menu { border-left: 1px solid #d2d6de; } } /* * Skin: Red * --------- */ .skin-red .main-header .navbar { background-color: #dd4b39; } .skin-red .main-header .navbar .nav > li > a { color: #ffffff; } .skin-red .main-header .navbar .nav > li > a:hover, .skin-red .main-header .navbar .nav > li > a:active, .skin-red .main-header .navbar .nav > li > a:focus, .skin-red .main-header .navbar .nav .open > a, .skin-red .main-header .navbar .nav .open > a:hover, .skin-red .main-header .navbar .nav .open > a:focus, .skin-red .main-header .navbar .nav > .active > a { background: rgba(0, 0, 0, 0.1); color: #f6f6f6; } .skin-red .main-header .navbar .sidebar-toggle { color: #ffffff; } .skin-red .main-header .navbar .sidebar-toggle:hover { color: #f6f6f6; background: rgba(0, 0, 0, 0.1); } .skin-red .main-header .navbar .sidebar-toggle { color: #fff; } .skin-red .main-header .navbar .sidebar-toggle:hover { background-color: #d73925; } @media (max-width: 767px) { .skin-red .main-header .navbar .dropdown-menu li.divider { background-color: rgba(255, 255, 255, 0.1); } .skin-red .main-header .navbar .dropdown-menu li a { color: #fff; } .skin-red .main-header .navbar .dropdown-menu li a:hover { background: #d73925; } } .skin-red .main-header .logo { background-color: #d73925; color: #ffffff; border-bottom: 0 solid transparent; } .skin-red .main-header .logo:hover { background-color: #d33724; } .skin-red .main-header li.user-header { background-color: #dd4b39; } .skin-red .content-header { background: transparent; } .skin-red .wrapper, .skin-red .main-sidebar, .skin-red .left-side { background-color: #222d32; } .skin-red .user-panel > .info, .skin-red .user-panel > .info > a { color: #fff; } .skin-red .sidebar-menu > li.header { color: #4b646f; background: #1a2226; } .skin-red .sidebar-menu > li > a { border-left: 3px solid transparent; } .skin-red .sidebar-menu > li:hover > a, .skin-red .sidebar-menu > li.active > a { color: #ffffff; background: #1e282c; border-left-color: #dd4b39; } .skin-red .sidebar-menu > li > .treeview-menu { margin: 0 1px; background: #2c3b41; } .skin-red .sidebar a { color: #b8c7ce; } .skin-red .sidebar a:hover { text-decoration: none; } .skin-red .treeview-menu > li > a { color: #8aa4af; } .skin-red .treeview-menu > li.active > a, .skin-red .treeview-menu > li > a:hover { color: #ffffff; } .skin-red .sidebar-form { border-radius: 3px; border: 1px solid #374850; margin: 10px 10px; } .skin-red .sidebar-form input[type="text"], .skin-red .sidebar-form .btn { box-shadow: none; background-color: #374850; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-red .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-red .sidebar-form input[type="text"]:focus, .skin-red .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-red .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-red .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } /* * Skin: Red * --------- */ .skin-red-light .main-header .navbar { background-color: #dd4b39; } .skin-red-light .main-header .navbar .nav > li > a { color: #ffffff; } .skin-red-light .main-header .navbar .nav > li > a:hover, .skin-red-light .main-header .navbar .nav > li > a:active, .skin-red-light .main-header .navbar .nav > li > a:focus, .skin-red-light .main-header .navbar .nav .open > a, .skin-red-light .main-header .navbar .nav .open > a:hover, .skin-red-light .main-header .navbar .nav .open > a:focus, .skin-red-light .main-header .navbar .nav > .active > a { background: rgba(0, 0, 0, 0.1); color: #f6f6f6; } .skin-red-light .main-header .navbar .sidebar-toggle { color: #ffffff; } .skin-red-light .main-header .navbar .sidebar-toggle:hover { color: #f6f6f6; background: rgba(0, 0, 0, 0.1); } .skin-red-light .main-header .navbar .sidebar-toggle { color: #fff; } .skin-red-light .main-header .navbar .sidebar-toggle:hover { background-color: #d73925; } @media (max-width: 767px) { .skin-red-light .main-header .navbar .dropdown-menu li.divider { background-color: rgba(255, 255, 255, 0.1); } .skin-red-light .main-header .navbar .dropdown-menu li a { color: #fff; } .skin-red-light .main-header .navbar .dropdown-menu li a:hover { background: #d73925; } } .skin-red-light .main-header .logo { background-color: #dd4b39; color: #ffffff; border-bottom: 0 solid transparent; } .skin-red-light .main-header .logo:hover { background-color: #dc4735; } .skin-red-light .main-header li.user-header { background-color: #dd4b39; } .skin-red-light .content-header { background: transparent; } .skin-red-light .wrapper, .skin-red-light .main-sidebar, .skin-red-light .left-side { background-color: #f9fafc; } .skin-red-light .content-wrapper, .skin-red-light .main-footer { border-left: 1px solid #d2d6de; } .skin-red-light .user-panel > .info, .skin-red-light .user-panel > .info > a { color: #444444; } .skin-red-light .sidebar-menu > li { -webkit-transition: border-left-color 0.3s ease; -o-transition: border-left-color 0.3s ease; transition: border-left-color 0.3s ease; } .skin-red-light .sidebar-menu > li.header { color: #848484; background: #f9fafc; } .skin-red-light .sidebar-menu > li > a { border-left: 3px solid transparent; font-weight: 600; } .skin-red-light .sidebar-menu > li:hover > a, .skin-red-light .sidebar-menu > li.active > a { color: #000000; background: #f4f4f5; } .skin-red-light .sidebar-menu > li.active { border-left-color: #dd4b39; } .skin-red-light .sidebar-menu > li.active > a { font-weight: 600; } .skin-red-light .sidebar-menu > li > .treeview-menu { background: #f4f4f5; } .skin-red-light .sidebar a { color: #444444; } .skin-red-light .sidebar a:hover { text-decoration: none; } .skin-red-light .treeview-menu > li > a { color: #777777; } .skin-red-light .treeview-menu > li.active > a, .skin-red-light .treeview-menu > li > a:hover { color: #000000; } .skin-red-light .treeview-menu > li.active > a { font-weight: 600; } .skin-red-light .sidebar-form { border-radius: 3px; border: 1px solid #d2d6de; margin: 10px 10px; } .skin-red-light .sidebar-form input[type="text"], .skin-red-light .sidebar-form .btn { box-shadow: none; background-color: #fff; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-red-light .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-red-light .sidebar-form input[type="text"]:focus, .skin-red-light .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-red-light .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-red-light .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } @media (min-width: 768px) { .skin-red-light.sidebar-mini.sidebar-collapse .sidebar-menu > li > .treeview-menu { border-left: 1px solid #d2d6de; } } /* * Skin: Yellow * ------------ */ .skin-yellow .main-header .navbar { background-color: #f39c12; } .skin-yellow .main-header .navbar .nav > li > a { color: #ffffff; } .skin-yellow .main-header .navbar .nav > li > a:hover, .skin-yellow .main-header .navbar .nav > li > a:active, .skin-yellow .main-header .navbar .nav > li > a:focus, .skin-yellow .main-header .navbar .nav .open > a, .skin-yellow .main-header .navbar .nav .open > a:hover, .skin-yellow .main-header .navbar .nav .open > a:focus, .skin-yellow .main-header .navbar .nav > .active > a { background: rgba(0, 0, 0, 0.1); color: #f6f6f6; } .skin-yellow .main-header .navbar .sidebar-toggle { color: #ffffff; } .skin-yellow .main-header .navbar .sidebar-toggle:hover { color: #f6f6f6; background: rgba(0, 0, 0, 0.1); } .skin-yellow .main-header .navbar .sidebar-toggle { color: #fff; } .skin-yellow .main-header .navbar .sidebar-toggle:hover { background-color: #e08e0b; } @media (max-width: 767px) { .skin-yellow .main-header .navbar .dropdown-menu li.divider { background-color: rgba(255, 255, 255, 0.1); } .skin-yellow .main-header .navbar .dropdown-menu li a { color: #fff; } .skin-yellow .main-header .navbar .dropdown-menu li a:hover { background: #e08e0b; } } .skin-yellow .main-header .logo { background-color: #e08e0b; color: #ffffff; border-bottom: 0 solid transparent; } .skin-yellow .main-header .logo:hover { background-color: #db8b0b; } .skin-yellow .main-header li.user-header { background-color: #f39c12; } .skin-yellow .content-header { background: transparent; } .skin-yellow .wrapper, .skin-yellow .main-sidebar, .skin-yellow .left-side { background-color: #222d32; } .skin-yellow .user-panel > .info, .skin-yellow .user-panel > .info > a { color: #fff; } .skin-yellow .sidebar-menu > li.header { color: #4b646f; background: #1a2226; } .skin-yellow .sidebar-menu > li > a { border-left: 3px solid transparent; } .skin-yellow .sidebar-menu > li:hover > a, .skin-yellow .sidebar-menu > li.active > a { color: #ffffff; background: #1e282c; border-left-color: #f39c12; } .skin-yellow .sidebar-menu > li > .treeview-menu { margin: 0 1px; background: #2c3b41; } .skin-yellow .sidebar a { color: #b8c7ce; } .skin-yellow .sidebar a:hover { text-decoration: none; } .skin-yellow .treeview-menu > li > a { color: #8aa4af; } .skin-yellow .treeview-menu > li.active > a, .skin-yellow .treeview-menu > li > a:hover { color: #ffffff; } .skin-yellow .sidebar-form { border-radius: 3px; border: 1px solid #374850; margin: 10px 10px; } .skin-yellow .sidebar-form input[type="text"], .skin-yellow .sidebar-form .btn { box-shadow: none; background-color: #374850; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-yellow .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-yellow .sidebar-form input[type="text"]:focus, .skin-yellow .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-yellow .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-yellow .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } /* * Skin: Yellow * ------------ */ .skin-yellow-light .main-header .navbar { background-color: #f39c12; } .skin-yellow-light .main-header .navbar .nav > li > a { color: #ffffff; } .skin-yellow-light .main-header .navbar .nav > li > a:hover, .skin-yellow-light .main-header .navbar .nav > li > a:active, .skin-yellow-light .main-header .navbar .nav > li > a:focus, .skin-yellow-light .main-header .navbar .nav .open > a, .skin-yellow-light .main-header .navbar .nav .open > a:hover, .skin-yellow-light .main-header .navbar .nav .open > a:focus, .skin-yellow-light .main-header .navbar .nav > .active > a { background: rgba(0, 0, 0, 0.1); color: #f6f6f6; } .skin-yellow-light .main-header .navbar .sidebar-toggle { color: #ffffff; } .skin-yellow-light .main-header .navbar .sidebar-toggle:hover { color: #f6f6f6; background: rgba(0, 0, 0, 0.1); } .skin-yellow-light .main-header .navbar .sidebar-toggle { color: #fff; } .skin-yellow-light .main-header .navbar .sidebar-toggle:hover { background-color: #e08e0b; } @media (max-width: 767px) { .skin-yellow-light .main-header .navbar .dropdown-menu li.divider { background-color: rgba(255, 255, 255, 0.1); } .skin-yellow-light .main-header .navbar .dropdown-menu li a { color: #fff; } .skin-yellow-light .main-header .navbar .dropdown-menu li a:hover { background: #e08e0b; } } .skin-yellow-light .main-header .logo { background-color: #f39c12; color: #ffffff; border-bottom: 0 solid transparent; } .skin-yellow-light .main-header .logo:hover { background-color: #f39a0d; } .skin-yellow-light .main-header li.user-header { background-color: #f39c12; } .skin-yellow-light .content-header { background: transparent; } .skin-yellow-light .wrapper, .skin-yellow-light .main-sidebar, .skin-yellow-light .left-side { background-color: #f9fafc; } .skin-yellow-light .content-wrapper, .skin-yellow-light .main-footer { border-left: 1px solid #d2d6de; } .skin-yellow-light .user-panel > .info, .skin-yellow-light .user-panel > .info > a { color: #444444; } .skin-yellow-light .sidebar-menu > li { -webkit-transition: border-left-color 0.3s ease; -o-transition: border-left-color 0.3s ease; transition: border-left-color 0.3s ease; } .skin-yellow-light .sidebar-menu > li.header { color: #848484; background: #f9fafc; } .skin-yellow-light .sidebar-menu > li > a { border-left: 3px solid transparent; font-weight: 600; } .skin-yellow-light .sidebar-menu > li:hover > a, .skin-yellow-light .sidebar-menu > li.active > a { color: #000000; background: #f4f4f5; } .skin-yellow-light .sidebar-menu > li.active { border-left-color: #f39c12; } .skin-yellow-light .sidebar-menu > li.active > a { font-weight: 600; } .skin-yellow-light .sidebar-menu > li > .treeview-menu { background: #f4f4f5; } .skin-yellow-light .sidebar a { color: #444444; } .skin-yellow-light .sidebar a:hover { text-decoration: none; } .skin-yellow-light .treeview-menu > li > a { color: #777777; } .skin-yellow-light .treeview-menu > li.active > a, .skin-yellow-light .treeview-menu > li > a:hover { color: #000000; } .skin-yellow-light .treeview-menu > li.active > a { font-weight: 600; } .skin-yellow-light .sidebar-form { border-radius: 3px; border: 1px solid #d2d6de; margin: 10px 10px; } .skin-yellow-light .sidebar-form input[type="text"], .skin-yellow-light .sidebar-form .btn { box-shadow: none; background-color: #fff; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-yellow-light .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-yellow-light .sidebar-form input[type="text"]:focus, .skin-yellow-light .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-yellow-light .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-yellow-light .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } @media (min-width: 768px) { .skin-yellow-light.sidebar-mini.sidebar-collapse .sidebar-menu > li > .treeview-menu { border-left: 1px solid #d2d6de; } } /* * Skin: Purple * ------------ */ .skin-purple .main-header .navbar { background-color: #605ca8; } .skin-purple .main-header .navbar .nav > li > a { color: #ffffff; } .skin-purple .main-header .navbar .nav > li > a:hover, .skin-purple .main-header .navbar .nav > li > a:active, .skin-purple .main-header .navbar .nav > li > a:focus, .skin-purple .main-header .navbar .nav .open > a, .skin-purple .main-header .navbar .nav .open > a:hover, .skin-purple .main-header .navbar .nav .open > a:focus, .skin-purple .main-header .navbar .nav > .active > a { background: rgba(0, 0, 0, 0.1); color: #f6f6f6; } .skin-purple .main-header .navbar .sidebar-toggle { color: #ffffff; } .skin-purple .main-header .navbar .sidebar-toggle:hover { color: #f6f6f6; background: rgba(0, 0, 0, 0.1); } .skin-purple .main-header .navbar .sidebar-toggle { color: #fff; } .skin-purple .main-header .navbar .sidebar-toggle:hover { background-color: #555299; } @media (max-width: 767px) { .skin-purple .main-header .navbar .dropdown-menu li.divider { background-color: rgba(255, 255, 255, 0.1); } .skin-purple .main-header .navbar .dropdown-menu li a { color: #fff; } .skin-purple .main-header .navbar .dropdown-menu li a:hover { background: #555299; } } .skin-purple .main-header .logo { background-color: #555299; color: #ffffff; border-bottom: 0 solid transparent; } .skin-purple .main-header .logo:hover { background-color: #545096; } .skin-purple .main-header li.user-header { background-color: #605ca8; } .skin-purple .content-header { background: transparent; } .skin-purple .wrapper, .skin-purple .main-sidebar, .skin-purple .left-side { background-color: #222d32; } .skin-purple .user-panel > .info, .skin-purple .user-panel > .info > a { color: #fff; } .skin-purple .sidebar-menu > li.header { color: #4b646f; background: #1a2226; } .skin-purple .sidebar-menu > li > a { border-left: 3px solid transparent; } .skin-purple .sidebar-menu > li:hover > a, .skin-purple .sidebar-menu > li.active > a { color: #ffffff; background: #1e282c; border-left-color: #605ca8; } .skin-purple .sidebar-menu > li > .treeview-menu { margin: 0 1px; background: #2c3b41; } .skin-purple .sidebar a { color: #b8c7ce; } .skin-purple .sidebar a:hover { text-decoration: none; } .skin-purple .treeview-menu > li > a { color: #8aa4af; } .skin-purple .treeview-menu > li.active > a, .skin-purple .treeview-menu > li > a:hover { color: #ffffff; } .skin-purple .sidebar-form { border-radius: 3px; border: 1px solid #374850; margin: 10px 10px; } .skin-purple .sidebar-form input[type="text"], .skin-purple .sidebar-form .btn { box-shadow: none; background-color: #374850; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-purple .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-purple .sidebar-form input[type="text"]:focus, .skin-purple .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-purple .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-purple .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } /* * Skin: Purple * ------------ */ .skin-purple-light .main-header .navbar { background-color: #605ca8; } .skin-purple-light .main-header .navbar .nav > li > a { color: #ffffff; } .skin-purple-light .main-header .navbar .nav > li > a:hover, .skin-purple-light .main-header .navbar .nav > li > a:active, .skin-purple-light .main-header .navbar .nav > li > a:focus, .skin-purple-light .main-header .navbar .nav .open > a, .skin-purple-light .main-header .navbar .nav .open > a:hover, .skin-purple-light .main-header .navbar .nav .open > a:focus, .skin-purple-light .main-header .navbar .nav > .active > a { background: rgba(0, 0, 0, 0.1); color: #f6f6f6; } .skin-purple-light .main-header .navbar .sidebar-toggle { color: #ffffff; } .skin-purple-light .main-header .navbar .sidebar-toggle:hover { color: #f6f6f6; background: rgba(0, 0, 0, 0.1); } .skin-purple-light .main-header .navbar .sidebar-toggle { color: #fff; } .skin-purple-light .main-header .navbar .sidebar-toggle:hover { background-color: #555299; } @media (max-width: 767px) { .skin-purple-light .main-header .navbar .dropdown-menu li.divider { background-color: rgba(255, 255, 255, 0.1); } .skin-purple-light .main-header .navbar .dropdown-menu li a { color: #fff; } .skin-purple-light .main-header .navbar .dropdown-menu li a:hover { background: #555299; } } .skin-purple-light .main-header .logo { background-color: #605ca8; color: #ffffff; border-bottom: 0 solid transparent; } .skin-purple-light .main-header .logo:hover { background-color: #5d59a6; } .skin-purple-light .main-header li.user-header { background-color: #605ca8; } .skin-purple-light .content-header { background: transparent; } .skin-purple-light .wrapper, .skin-purple-light .main-sidebar, .skin-purple-light .left-side { background-color: #f9fafc; } .skin-purple-light .content-wrapper, .skin-purple-light .main-footer { border-left: 1px solid #d2d6de; } .skin-purple-light .user-panel > .info, .skin-purple-light .user-panel > .info > a { color: #444444; } .skin-purple-light .sidebar-menu > li { -webkit-transition: border-left-color 0.3s ease; -o-transition: border-left-color 0.3s ease; transition: border-left-color 0.3s ease; } .skin-purple-light .sidebar-menu > li.header { color: #848484; background: #f9fafc; } .skin-purple-light .sidebar-menu > li > a { border-left: 3px solid transparent; font-weight: 600; } .skin-purple-light .sidebar-menu > li:hover > a, .skin-purple-light .sidebar-menu > li.active > a { color: #000000; background: #f4f4f5; } .skin-purple-light .sidebar-menu > li.active { border-left-color: #605ca8; } .skin-purple-light .sidebar-menu > li.active > a { font-weight: 600; } .skin-purple-light .sidebar-menu > li > .treeview-menu { background: #f4f4f5; } .skin-purple-light .sidebar a { color: #444444; } .skin-purple-light .sidebar a:hover { text-decoration: none; } .skin-purple-light .treeview-menu > li > a { color: #777777; } .skin-purple-light .treeview-menu > li.active > a, .skin-purple-light .treeview-menu > li > a:hover { color: #000000; } .skin-purple-light .treeview-menu > li.active > a { font-weight: 600; } .skin-purple-light .sidebar-form { border-radius: 3px; border: 1px solid #d2d6de; margin: 10px 10px; } .skin-purple-light .sidebar-form input[type="text"], .skin-purple-light .sidebar-form .btn { box-shadow: none; background-color: #fff; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-purple-light .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-purple-light .sidebar-form input[type="text"]:focus, .skin-purple-light .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-purple-light .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-purple-light .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } @media (min-width: 768px) { .skin-purple-light.sidebar-mini.sidebar-collapse .sidebar-menu > li > .treeview-menu { border-left: 1px solid #d2d6de; } } ================================================ FILE: web/static/dist/css/skins/skin-blue.css ================================================ /* * Skin: Blue * ---------- */ .skin-blue .main-header .navbar { background-color: #3c8dbc; } .skin-blue .main-header .navbar .nav > li > a { color: #ffffff; } .skin-blue .main-header .navbar .nav > li > a:hover, .skin-blue .main-header .navbar .nav > li > a:active, .skin-blue .main-header .navbar .nav > li > a:focus, .skin-blue .main-header .navbar .nav .open > a, .skin-blue .main-header .navbar .nav .open > a:hover, .skin-blue .main-header .navbar .nav .open > a:focus, .skin-blue .main-header .navbar .nav > .active > a { background: rgba(0, 0, 0, 0.1); color: #f6f6f6; } .skin-blue .main-header .navbar .sidebar-toggle { color: #ffffff; } .skin-blue .main-header .navbar .sidebar-toggle:hover { color: #f6f6f6; background: rgba(0, 0, 0, 0.1); } .skin-blue .main-header .navbar .sidebar-toggle { color: #fff; } .skin-blue .main-header .navbar .sidebar-toggle:hover { background-color: #367fa9; } @media (max-width: 767px) { .skin-blue .main-header .navbar .dropdown-menu li.divider { background-color: rgba(255, 255, 255, 0.1); } .skin-blue .main-header .navbar .dropdown-menu li a { color: #fff; } .skin-blue .main-header .navbar .dropdown-menu li a:hover { background: #367fa9; } } .skin-blue .main-header .logo { background-color: #367fa9; color: #ffffff; border-bottom: 0 solid transparent; } .skin-blue .main-header .logo:hover { background-color: #357ca5; } .skin-blue .main-header li.user-header { background-color: #3c8dbc; } .skin-blue .content-header { background: transparent; } .skin-blue .wrapper, .skin-blue .main-sidebar, .skin-blue .left-side { background-color: #222d32; } .skin-blue .user-panel > .info, .skin-blue .user-panel > .info > a { color: #fff; } .skin-blue .sidebar-menu > li.header { color: #4b646f; background: #1a2226; } .skin-blue .sidebar-menu > li > a { border-left: 3px solid transparent; } .skin-blue .sidebar-menu > li:hover > a, .skin-blue .sidebar-menu > li.active > a { color: #ffffff; background: #1e282c; border-left-color: #3c8dbc; } .skin-blue .sidebar-menu > li > .treeview-menu { margin: 0 1px; background: #2c3b41; } .skin-blue .sidebar a { color: #b8c7ce; } .skin-blue .sidebar a:hover { text-decoration: none; } .skin-blue .treeview-menu > li > a { color: #8aa4af; } .skin-blue .treeview-menu > li.active > a, .skin-blue .treeview-menu > li > a:hover { color: #ffffff; } .skin-blue .sidebar-form { border-radius: 3px; border: 1px solid #374850; margin: 10px 10px; } .skin-blue .sidebar-form input[type="text"], .skin-blue .sidebar-form .btn { box-shadow: none; background-color: #374850; border: 1px solid transparent; height: 35px; -webkit-transition: all 0.3s ease-in-out; -o-transition: all 0.3s ease-in-out; transition: all 0.3s ease-in-out; } .skin-blue .sidebar-form input[type="text"] { color: #666; border-top-left-radius: 2px; border-top-right-radius: 0; border-bottom-right-radius: 0; border-bottom-left-radius: 2px; } .skin-blue .sidebar-form input[type="text"]:focus, .skin-blue .sidebar-form input[type="text"]:focus + .input-group-btn .btn { background-color: #fff; color: #666; } .skin-blue .sidebar-form input[type="text"]:focus + .input-group-btn .btn { border-left-color: #fff; } .skin-blue .sidebar-form .btn { color: #999; border-top-left-radius: 0; border-top-right-radius: 2px; border-bottom-right-radius: 2px; border-bottom-left-radius: 0; } .skin-blue.layout-top-nav .main-header > .logo { background-color: #3c8dbc; color: #ffffff; border-bottom: 0 solid transparent; } .skin-blue.layout-top-nav .main-header > .logo:hover { background-color: #3b8ab8; } ================================================ FILE: web/static/dist/js/app.js ================================================ /*! AdminLTE app.js * ================ * Main JS application file for AdminLTE v2. This file * should be included in all pages. It controls some layout * options and implements exclusive AdminLTE plugins. * * @Author Almsaeed Studio * @Support * @Email * @version 2.3.2 * @license MIT */ //Make sure jQuery has been loaded before app.js if (typeof jQuery === "undefined") { throw new Error("AdminLTE requires jQuery"); } /* AdminLTE * * @type Object * @description $.AdminLTE is the main object for the template's app. * It's used for implementing functions and options related * to the template. Keeping everything wrapped in an object * prevents conflict with other plugins and is a better * way to organize our code. */ $.AdminLTE = {}; /* -------------------- * - AdminLTE Options - * -------------------- * Modify these options to suit your implementation */ $.AdminLTE.options = { //Add slimscroll to navbar menus //This requires you to load the slimscroll plugin //in every page before app.js navbarMenuSlimscroll: true, navbarMenuSlimscrollWidth: "3px", //The width of the scroll bar navbarMenuHeight: "200px", //The height of the inner menu //General animation speed for JS animated elements such as box collapse/expand and //sidebar treeview slide up/down. This options accepts an integer as milliseconds, //'fast', 'normal', or 'slow' animationSpeed: 500, //Sidebar push menu toggle button selector sidebarToggleSelector: "[data-toggle='offcanvas']", //Activate sidebar push menu sidebarPushMenu: true, //Activate sidebar slimscroll if the fixed layout is set (requires SlimScroll Plugin) sidebarSlimScroll: true, //Enable sidebar expand on hover effect for sidebar mini //This option is forced to true if both the fixed layout and sidebar mini //are used together sidebarExpandOnHover: false, //BoxRefresh Plugin enableBoxRefresh: true, //Bootstrap.js tooltip enableBSToppltip: true, BSTooltipSelector: "[data-toggle='tooltip']", //Enable Fast Click. Fastclick.js creates a more //native touch experience with touch devices. If you //choose to enable the plugin, make sure you load the script //before AdminLTE's app.js enableFastclick: true, //Control Sidebar Options enableControlSidebar: true, controlSidebarOptions: { //Which button should trigger the open/close event toggleBtnSelector: "[data-toggle='control-sidebar']", //The sidebar selector selector: ".control-sidebar", //Enable slide over content slide: true }, //Box Widget Plugin. Enable this plugin //to allow boxes to be collapsed and/or removed enableBoxWidget: true, //Box Widget plugin options boxWidgetOptions: { boxWidgetIcons: { //Collapse icon collapse: 'fa-minus', //Open icon open: 'fa-plus', //Remove icon remove: 'fa-times' }, boxWidgetSelectors: { //Remove button selector remove: '[data-widget="remove"]', //Collapse button selector collapse: '[data-widget="collapse"]' } }, //Direct Chat plugin options directChat: { //Enable direct chat by default enable: true, //The button to open and close the chat contacts pane contactToggleSelector: '[data-widget="chat-pane-toggle"]' }, //Define the set of colors to use globally around the website colors: { lightBlue: "#3c8dbc", red: "#f56954", green: "#00a65a", aqua: "#00c0ef", yellow: "#f39c12", blue: "#0073b7", navy: "#001F3F", teal: "#39CCCC", olive: "#3D9970", lime: "#01FF70", orange: "#FF851B", fuchsia: "#F012BE", purple: "#8E24AA", maroon: "#D81B60", black: "#222222", gray: "#d2d6de" }, //The standard screen sizes that bootstrap uses. //If you change these in the variables.less file, change //them here too. screenSizes: { xs: 480, sm: 768, md: 992, lg: 1200 } }; /* ------------------ * - Implementation - * ------------------ * The next block of code implements AdminLTE's * functions and plugins as specified by the * options above. */ $(function () { "use strict"; //Fix for IE page transitions $("body").removeClass("hold-transition"); //Extend options if external options exist if (typeof AdminLTEOptions !== "undefined") { $.extend(true, $.AdminLTE.options, AdminLTEOptions); } //Easy access to options var o = $.AdminLTE.options; //Set up the object _init(); //Activate the layout maker $.AdminLTE.layout.activate(); //Enable sidebar tree view controls $.AdminLTE.tree('.sidebar'); //Enable control sidebar if (o.enableControlSidebar) { $.AdminLTE.controlSidebar.activate(); } //Add slimscroll to navbar dropdown if (o.navbarMenuSlimscroll && typeof $.fn.slimscroll != 'undefined') { $(".navbar .menu").slimscroll({ height: o.navbarMenuHeight, alwaysVisible: false, size: o.navbarMenuSlimscrollWidth }).css("width", "100%"); } //Activate sidebar push menu if (o.sidebarPushMenu) { $.AdminLTE.pushMenu.activate(o.sidebarToggleSelector); } //Activate Bootstrap tooltip if (o.enableBSToppltip) { $('body').tooltip({ selector: o.BSTooltipSelector }); } //Activate box widget if (o.enableBoxWidget) { $.AdminLTE.boxWidget.activate(); } //Activate fast click if (o.enableFastclick && typeof FastClick != 'undefined') { FastClick.attach(document.body); } //Activate direct chat widget if (o.directChat.enable) { $(document).on('click', o.directChat.contactToggleSelector, function () { var box = $(this).parents('.direct-chat').first(); box.toggleClass('direct-chat-contacts-open'); }); } /* * INITIALIZE BUTTON TOGGLE * ------------------------ */ $('.btn-group[data-toggle="btn-toggle"]').each(function () { var group = $(this); $(this).find(".btn").on('click', function (e) { group.find(".btn.active").removeClass("active"); $(this).addClass("active"); e.preventDefault(); }); }); }); /* ---------------------------------- * - Initialize the AdminLTE Object - * ---------------------------------- * All AdminLTE functions are implemented below. */ function _init() { 'use strict'; /* Layout * ====== * Fixes the layout height in case min-height fails. * * @type Object * @usage $.AdminLTE.layout.activate() * $.AdminLTE.layout.fix() * $.AdminLTE.layout.fixSidebar() */ $.AdminLTE.layout = { activate: function () { var _this = this; _this.fix(); _this.fixSidebar(); $(window, ".wrapper").resize(function () { _this.fix(); _this.fixSidebar(); }); }, fix: function () { //Get window height and the wrapper height var neg = $('.main-header').outerHeight() + $('.main-footer').outerHeight(); var window_height = $(window).height(); var sidebar_height = $(".sidebar").height(); //Set the min-height of the content and sidebar based on the //the height of the document. if ($("body").hasClass("fixed")) { $(".content-wrapper, .right-side").css('min-height', window_height - $('.main-footer').outerHeight()); } else { var postSetWidth; if (window_height >= sidebar_height) { $(".content-wrapper, .right-side").css('min-height', window_height - neg); postSetWidth = window_height - neg; } else { $(".content-wrapper, .right-side").css('min-height', sidebar_height); postSetWidth = sidebar_height; } //Fix for the control sidebar height var controlSidebar = $($.AdminLTE.options.controlSidebarOptions.selector); if (typeof controlSidebar !== "undefined") { if (controlSidebar.height() > postSetWidth) $(".content-wrapper, .right-side").css('min-height', controlSidebar.height()); } } }, fixSidebar: function () { //Make sure the body tag has the .fixed class if (!$("body").hasClass("fixed")) { if (typeof $.fn.slimScroll != 'undefined') { $(".sidebar").slimScroll({destroy: true}).height("auto"); } return; } else if (typeof $.fn.slimScroll == 'undefined' && window.console) { window.console.error("Error: the fixed layout requires the slimscroll plugin!"); } //Enable slimscroll for fixed layout if ($.AdminLTE.options.sidebarSlimScroll) { if (typeof $.fn.slimScroll != 'undefined') { //Destroy if it exists $(".sidebar").slimScroll({destroy: true}).height("auto"); //Add slimscroll $(".sidebar").slimscroll({ height: ($(window).height() - $(".main-header").height()) + "px", color: "rgba(0,0,0,0.2)", size: "3px" }); } } } }; /* PushMenu() * ========== * Adds the push menu functionality to the sidebar. * * @type Function * @usage: $.AdminLTE.pushMenu("[data-toggle='offcanvas']") */ $.AdminLTE.pushMenu = { activate: function (toggleBtn) { //Get the screen sizes var screenSizes = $.AdminLTE.options.screenSizes; //Enable sidebar toggle $(document).on('click', toggleBtn, function (e) { e.preventDefault(); //Enable sidebar push menu if ($(window).width() > (screenSizes.sm - 1)) { if ($("body").hasClass('sidebar-collapse')) { $("body").removeClass('sidebar-collapse').trigger('expanded.pushMenu'); } else { $("body").addClass('sidebar-collapse').trigger('collapsed.pushMenu'); } } //Handle sidebar push menu for small screens else { if ($("body").hasClass('sidebar-open')) { $("body").removeClass('sidebar-open').removeClass('sidebar-collapse').trigger('collapsed.pushMenu'); } else { $("body").addClass('sidebar-open').trigger('expanded.pushMenu'); } } }); $(".content-wrapper").click(function () { //Enable hide menu when clicking on the content-wrapper on small screens if ($(window).width() <= (screenSizes.sm - 1) && $("body").hasClass("sidebar-open")) { $("body").removeClass('sidebar-open'); } }); //Enable expand on hover for sidebar mini if ($.AdminLTE.options.sidebarExpandOnHover || ($('body').hasClass('fixed') && $('body').hasClass('sidebar-mini'))) { this.expandOnHover(); } }, expandOnHover: function () { var _this = this; var screenWidth = $.AdminLTE.options.screenSizes.sm - 1; //Expand sidebar on hover $('.main-sidebar').hover(function () { if ($('body').hasClass('sidebar-mini') && $("body").hasClass('sidebar-collapse') && $(window).width() > screenWidth) { _this.expand(); } }, function () { if ($('body').hasClass('sidebar-mini') && $('body').hasClass('sidebar-expanded-on-hover') && $(window).width() > screenWidth) { _this.collapse(); } }); }, expand: function () { $("body").removeClass('sidebar-collapse').addClass('sidebar-expanded-on-hover'); }, collapse: function () { if ($('body').hasClass('sidebar-expanded-on-hover')) { $('body').removeClass('sidebar-expanded-on-hover').addClass('sidebar-collapse'); } } }; /* Tree() * ====== * Converts the sidebar into a multilevel * tree view menu. * * @type Function * @Usage: $.AdminLTE.tree('.sidebar') */ $.AdminLTE.tree = function (menu) { var _this = this; var animationSpeed = $.AdminLTE.options.animationSpeed; $(document).on('click', menu + ' li a', function (e) { //Get the clicked link and the next element var $this = $(this); var checkElement = $this.next(); //Check if the next element is a menu and is visible if ((checkElement.is('.treeview-menu')) && (checkElement.is(':visible')) && (!$('body').hasClass('sidebar-collapse'))) { //Close the menu checkElement.slideUp(animationSpeed, function () { checkElement.removeClass('menu-open'); //Fix the layout in case the sidebar stretches over the height of the window //_this.layout.fix(); }); checkElement.parent("li").removeClass("active"); } //If the menu is not visible else if ((checkElement.is('.treeview-menu')) && (!checkElement.is(':visible'))) { //Get the parent menu var parent = $this.parents('ul').first(); //Close all open menus within the parent var ul = parent.find('ul:visible').slideUp(animationSpeed); //Remove the menu-open class from the parent ul.removeClass('menu-open'); //Get the parent li var parent_li = $this.parent("li"); //Open the target menu and add the menu-open class checkElement.slideDown(animationSpeed, function () { //Add the class active to the parent li checkElement.addClass('menu-open'); parent.find('li.active').removeClass('active'); parent_li.addClass('active'); //Fix the layout in case the sidebar stretches over the height of the window _this.layout.fix(); }); } //if this isn't a link, prevent the page from being redirected if (checkElement.is('.treeview-menu')) { e.preventDefault(); } }); }; /* ControlSidebar * ============== * Adds functionality to the right sidebar * * @type Object * @usage $.AdminLTE.controlSidebar.activate(options) */ $.AdminLTE.controlSidebar = { //instantiate the object activate: function () { //Get the object var _this = this; //Update options var o = $.AdminLTE.options.controlSidebarOptions; //Get the sidebar var sidebar = $(o.selector); //The toggle button var btn = $(o.toggleBtnSelector); //Listen to the click event btn.on('click', function (e) { e.preventDefault(); //If the sidebar is not open if (!sidebar.hasClass('control-sidebar-open') && !$('body').hasClass('control-sidebar-open')) { //Open the sidebar _this.open(sidebar, o.slide); } else { _this.close(sidebar, o.slide); } }); //If the body has a boxed layout, fix the sidebar bg position var bg = $(".control-sidebar-bg"); _this._fix(bg); //If the body has a fixed layout, make the control sidebar fixed if ($('body').hasClass('fixed')) { _this._fixForFixed(sidebar); } else { //If the content height is less than the sidebar's height, force max height if ($('.content-wrapper, .right-side').height() < sidebar.height()) { _this._fixForContent(sidebar); } } }, //Open the control sidebar open: function (sidebar, slide) { //Slide over content if (slide) { sidebar.addClass('control-sidebar-open'); } else { //Push the content by adding the open class to the body instead //of the sidebar itself $('body').addClass('control-sidebar-open'); } }, //Close the control sidebar close: function (sidebar, slide) { if (slide) { sidebar.removeClass('control-sidebar-open'); } else { $('body').removeClass('control-sidebar-open'); } }, _fix: function (sidebar) { var _this = this; if ($("body").hasClass('layout-boxed')) { sidebar.css('position', 'absolute'); sidebar.height($(".wrapper").height()); $(window).resize(function () { _this._fix(sidebar); }); } else { sidebar.css({ 'position': 'fixed', 'height': 'auto' }); } }, _fixForFixed: function (sidebar) { sidebar.css({ 'position': 'fixed', 'max-height': '100%', 'overflow': 'auto', 'padding-bottom': '50px' }); }, _fixForContent: function (sidebar) { $(".content-wrapper, .right-side").css('min-height', sidebar.height()); } }; /* BoxWidget * ========= * BoxWidget is a plugin to handle collapsing and * removing boxes from the screen. * * @type Object * @usage $.AdminLTE.boxWidget.activate() * Set all your options in the main $.AdminLTE.options object */ $.AdminLTE.boxWidget = { selectors: $.AdminLTE.options.boxWidgetOptions.boxWidgetSelectors, icons: $.AdminLTE.options.boxWidgetOptions.boxWidgetIcons, animationSpeed: $.AdminLTE.options.animationSpeed, activate: function (_box) { var _this = this; if (!_box) { _box = document; // activate all boxes per default } //Listen for collapse event triggers $(_box).on('click', _this.selectors.collapse, function (e) { e.preventDefault(); _this.collapse($(this)); }); //Listen for remove event triggers $(_box).on('click', _this.selectors.remove, function (e) { e.preventDefault(); _this.remove($(this)); }); }, collapse: function (element) { var _this = this; //Find the box parent var box = element.parents(".box").first(); //Find the body and the footer var box_content = box.find("> .box-body, > .box-footer, > form >.box-body, > form > .box-footer"); if (!box.hasClass("collapsed-box")) { //Convert minus into plus element.children(":first") .removeClass(_this.icons.collapse) .addClass(_this.icons.open); //Hide the content box_content.slideUp(_this.animationSpeed, function () { box.addClass("collapsed-box"); }); } else { //Convert plus into minus element.children(":first") .removeClass(_this.icons.open) .addClass(_this.icons.collapse); //Show the content box_content.slideDown(_this.animationSpeed, function () { box.removeClass("collapsed-box"); }); } }, remove: function (element) { //Find the box parent var box = element.parents(".box").first(); box.slideUp(this.animationSpeed); } }; } /* ------------------ * - Custom Plugins - * ------------------ * All custom plugins are defined below. */ /* * BOX REFRESH BUTTON * ------------------ * This is a custom plugin to use with the component BOX. It allows you to add * a refresh button to the box. It converts the box's state to a loading state. * * @type plugin * @usage $("#box-widget").boxRefresh( options ); */ (function ($) { "use strict"; $.fn.boxRefresh = function (options) { // Render options var settings = $.extend({ //Refresh button selector trigger: ".refresh-btn", //File source to be loaded (e.g: ajax/src.php) source: "", //Callbacks onLoadStart: function (box) { return box; }, //Right after the button has been clicked onLoadDone: function (box) { return box; } //When the source has been loaded }, options); //The overlay var overlay = $('
'); return this.each(function () { //if a source is specified if (settings.source === "") { if (window.console) { window.console.log("Please specify a source first - boxRefresh()"); } return; } //the box var box = $(this); //the button var rBtn = box.find(settings.trigger).first(); //On trigger click rBtn.on('click', function (e) { e.preventDefault(); //Add loading overlay start(box); //Perform ajax call box.find(".box-body").load(settings.source, function () { done(box); }); }); }); function start(box) { //Add overlay and loading img box.append(overlay); settings.onLoadStart.call(box); } function done(box) { //Remove overlay and loading img box.find(overlay).remove(); settings.onLoadDone.call(box); } }; })(jQuery); /* * EXPLICIT BOX CONTROLS * ----------------------- * This is a custom plugin to use with the component BOX. It allows you to activate * a box inserted in the DOM after the app.js was loaded, toggle and remove box. * * @type plugin * @usage $("#box-widget").activateBox(); * @usage $("#box-widget").toggleBox(); * @usage $("#box-widget").removeBox(); */ (function ($) { 'use strict'; $.fn.activateBox = function () { $.AdminLTE.boxWidget.activate(this); }; $.fn.toggleBox = function(){ var button = $($.AdminLTE.boxWidget.selectors.collapse, this); $.AdminLTE.boxWidget.collapse(button); }; $.fn.removeBox = function(){ var button = $($.AdminLTE.boxWidget.selectors.remove, this); $.AdminLTE.boxWidget.remove(button); }; })(jQuery); /* * TODO LIST CUSTOM PLUGIN * ----------------------- * This plugin depends on iCheck plugin for checkbox and radio inputs * * @type plugin * @usage $("#todo-widget").todolist( options ); */ (function ($) { 'use strict'; $.fn.todolist = function (options) { // Render options var settings = $.extend({ //When the user checks the input onCheck: function (ele) { return ele; }, //When the user unchecks the input onUncheck: function (ele) { return ele; } }, options); return this.each(function () { if (typeof $.fn.iCheck != 'undefined') { $('input', this).on('ifChecked', function () { var ele = $(this).parents("li").first(); ele.toggleClass("done"); settings.onCheck.call(ele); }); $('input', this).on('ifUnchecked', function () { var ele = $(this).parents("li").first(); ele.toggleClass("done"); settings.onUncheck.call(ele); }); } else { $('input', this).on('change', function () { var ele = $(this).parents("li").first(); ele.toggleClass("done"); if ($('input', ele).is(":checked")) { settings.onCheck.call(ele); } else { settings.onUncheck.call(ele); } }); } }); }; }(jQuery)); ================================================ FILE: web/static/js/plot_monitor.js ================================================ var mem_usedp = 0; var cpu_usedp = 0; var is_running = true; var ingress_rate = 0; var egress_rate = 0; var ingress_rate_limit = 0; var egress_rate_limit = 0; function processMemData(data) { } function getMemY() { return mem_usedp*100; } function processCpuData(data) { } function getCpuY() { return cpu_usedp*100; } function processRate(data) { } function getIngressRateP() { //alert(ingress_rate*8 / 1000.0); return ingress_rate * 8 / 1000.0; } function getEgressRateP() { return egress_rate * 8 / 1000.0; } function plot_graph(container,url,processData,getY,fetchdata=true, maxy=110) { //var container = $("#flot-line-chart-moving"); // Determine how many data points to keep based on the placeholder's initial size; // this gives us a nice high-res plot while avoiding more than one point per pixel. var maximum = container.outerWidth() / 2 || 300; // var data = []; function getBaseData() { while (data.length < maximum) { data.push(0) } // zip the generated y values with the x values var res = []; for (var i = 0; i < data.length; ++i) { res.push([i, data[i]]) } return res; } function getData() { if (data.length) { data = data.slice(1); } if (data.length < maximum) { if(fetchdata) $.post(url,{},processData,"json"); var y = getY(); data.push(y < 0 ? 0 : y > maxy ? maxy : y); } // zip the generated y values with the x values var res = []; for (var i = 0; i < data.length; ++i) { res.push([i, data[i]]) } return res; } series = [{ data: getBaseData(), lines: { fill: true } }]; var plot = $.plot(container, series, { grid: { color: "#999999", tickColor: "#D4D4D4", borderWidth:0, minBorderMargin: 20, labelMargin: 10, backgroundColor: { colors: ["#ffffff", "#ffffff"] }, margin: { top: 8, bottom: 20, left: 20 }, markings: function(axes) { var markings = []; var xaxis = axes.xaxis; for (var x = Math.floor(xaxis.min); x < xaxis.max; x += xaxis.tickSize * 2) { markings.push({ xaxis: { from: x, to: x + xaxis.tickSize }, color: "#fff" }); } return markings; } }, colors: ["#1ab394"], xaxis: { tickFormatter: function() { return ""; } }, yaxis: { min: 0, max: maxy }, legend: { show: true } }); // Update the random dataset at 25FPS for a smoothly-animating chart setInterval(function updateRandom() { series[0].data = getData(); plot.setData(series); plot.draw(); }, 1000); } var host = window.location.host; var node_name = $("#node_name").html(); var masterip = $("#masterip").html(); var url = "//" + host + "/monitor/" + masterip + "/vnodes/" + node_name; function num2human(data) { units=['','K','M','G','T']; tempdata = data/1.0; //return tempdata; for(var i = 1; i < units.length; ++i) { if( tempdata / 1000.0 > 1) tempdata = tempdata/1000.0; else return tempdata.toFixed(2) + units[i-1]; } return tempdata.toFixed(2) + units[4]; } function processInfo() { $.post(url+"/info/",{},function(data){ basic_info = data.monitor.basic_info; state = basic_info.State; if(state == 'STOPPED') { is_running = false; $("#con_state").html("
Stopped
"); $("#con_ip").html("--"); } else { is_running = true; $("#con_state").html("
Running
"); $("#con_ip").html(basic_info.IP); } var total = parseInt(basic_info.RunningTime); var hour = Math.floor(total / 3600); var min = Math.floor(total % 3600 / 60); var secs = Math.floor(total % 3600 % 60); $("#con_time").html(hour+"h "+min+"m "+secs+"s"); $("#con_billing").html(""+basic_info.billing+" "); $("#con_billingthishour").html(""+basic_info.billing_this_hour.total+" "); if(is_running) { cpu_usedp = data.monitor.cpu_use.usedp; var val = (data.monitor.cpu_use.val).toFixed(2); var unit = data.monitor.cpu_use.unit; var quota = data.monitor.cpu_use.quota.cpu; var quotaout = "("+quota; if(quota == 1) quotaout += " Core)"; else quotaout += " Cores)"; $("#con_cpu").html(val +" "+ unit+"
"+quotaout); mem_usedp = data.monitor.mem_use.usedp; var usedp = data.monitor.mem_use.usedp; unit = data.monitor.mem_use.unit; var quota = data.monitor.mem_use.quota.memory/1024.0; val = data.monitor.mem_use.val; var out = "("+val+unit+"/"+quota.toFixed(2)+"MiB)"; $("#con_mem").html((usedp/0.01).toFixed(2)+"%
"+out); } else { cpu_usedp = 0; $("#con_cpu").html("--"); mem_usedp = 0; $("#con_mem").html("--"); } //processDiskData var diskuse = data.monitor.disk_use; var usedp = diskuse.percent; var total = diskuse.total/1024.0/1024.0; var used = diskuse.used/1024.0/1024.0; var detail = "("+used.toFixed(2)+"MiB/"+total.toFixed(2)+"MiB)"; $("#con_disk").html(usedp+"%
"+detail); //processNetStats var net_stats = data.monitor.net_stats; if(!$.isEmptyObject(net_stats)) { var in_rate = parseInt(net_stats.bytes_recv_per_sec); var out_rate = parseInt(net_stats.bytes_sent_per_sec); ingress_rate = in_rate; egress_rate = out_rate; $("#net_in_rate").html(num2human(in_rate)+"Bps"); $("#net_out_rate").html(num2human(out_rate)+"Bps"); $("#net_in_bytes").html(num2human(net_stats.bytes_recv)+"B"); $("#net_out_bytes").html(num2human(net_stats.bytes_sent)+"B"); $("#net_in_packs").html(net_stats.packets_recv); $("#net_out_packs").html(net_stats.packets_sent); $("#net_in_err").html(net_stats.errout); $("#net_out_err").html(net_stats.errin); $("#net_in_drop").html(net_stats.dropout); $("#net_out_drop").html(net_stats.dropin); } else { ingress_rate = 0; egress_rate = 0; $("#net_in_rate").html("--"); $("#net_out_rate").html("--"); $("#net_in_bytes").html("--"); $("#net_out_bytes").html("--"); $("#net_in_packs").html("--"); $("#net_out_packs").html("--"); $("#net_in_err").html("--"); $("#net_out_err").html("--"); $("#net_in_drop").html("--"); $("#net_out_drop").html("--"); } },"json"); } function plot_net(host,monitorurl) { var url = "//" + host + "/user/selfQuery/"; $.post(url,{},function(data){ ingress_rate_limit = parseInt(data.groupinfo.input_rate_limit); egress_rate_limit = parseInt(data.groupinfo.output_rate_limit); if(ingress_rate_limit == 0) ingress_rate_limit = egress_rate_limit*1.5; plot_graph($("#ingress-chart"), monitorurl, processRate, getIngressRateP,false,ingress_rate_limit); plot_graph($("#egress-chart"), monitorurl, processRate, getEgressRateP,false,egress_rate_limit*1.5); },"json"); } setInterval(processInfo,1000); plot_graph($("#mem-chart"),url + "/mem_use/",processMemData,getMemY,false); plot_graph($("#cpu-chart"),url + "/cpu_use/",processCpuData,getCpuY,false); plot_net(host, url + "/net_stats/"); ================================================ FILE: web/static/js/plot_monitorReal.js ================================================ var used = 0; var total = 0; var idle = 0; var disk_usedp = 0; var count = 0; var Ki = 1024; var is_running = true; function processMemData(data) { if(is_running) { used = data.monitor.meminfo.used; total = data.monitor.meminfo.total; var used2 = ((data.monitor.meminfo.used)/Ki).toFixed(2); var total2 = ((data.monitor.meminfo.total)/Ki).toFixed(2); var free2 = ((data.monitor.meminfo.free)/Ki).toFixed(2); $("#mem_used").html(used2); $("#mem_total").html(total2); $("#mem_free").html(free2); } else { total = 0; $("#mem_used").html("--"); $("#mem_total").html("--"); $("#mem_free").html("--"); } } function getMemY() { if(total == 0) return 0; else return (used/total)*100; } function processCpuData(data) { if(is_running) { idle = data.monitor.cpuinfo.idle; var us = data.monitor.cpuinfo.user; var sy = data.monitor.cpuinfo.system; var wa = data.monitor.cpuinfo.iowait; $("#cpu_user").html(us); $("#cpu_system").html(sy); $("#cpu_iowait").html(wa); $("#cpu_idle").html(idle); } else { idle = 100; $("#cpu_user").html("--"); $("#cpu_system").html("--"); $("#cpu_iowait").html("--"); $("#cpu_idle").html("--"); } } function getCpuY() { count++; //alert(idle); if(count <= 3 && idle <= 10) return 0; else return (100-idle); } function processDiskData(data) { var vals = data.monitor.diskinfo; disk_usedp = vals[0].usedp; for(var idx = 0; idx < vals.length; ++idx) { var used = (vals[idx].used/Ki/Ki).toFixed(2); var total = (vals[idx].total/Ki/Ki).toFixed(2); var free = (vals[idx].free/Ki/Ki).toFixed(2); var usedp = (vals[idx].percent); var name = "#disk_" + (idx+1) + "_"; $(name+"device").html(vals[idx].device); $(name+"used").html(used); $(name+"total").html(total); $(name+"free").html(free); $(name+"usedp").html(usedp); } } function getDiskY() { return disk_usedp; } function plot_graph(container,url,processData,getY) { //var container = $("#flot-line-chart-moving"); // Determine how many data points to keep based on the placeholder's initial size; // this gives us a nice high-res plot while avoiding more than one point per pixel. var maximum = container.outerWidth() / 2 || 300; // var data = []; function getBaseData() { while (data.length < maximum) { data.push(0) } // zip the generated y values with the x values var res = []; for (var i = 0; i < data.length; ++i) { res.push([i, data[i]]) } return res; } function getData() { if (data.length) { data = data.slice(1); } if (data.length < maximum) { $.post(url,{user:"root",key:"unias"},processData,"json"); var y = getY(); data.push(y < 0 ? 0 : y > 100 ? 100 : y); } // zip the generated y values with the x values var res = []; for (var i = 0; i < data.length; ++i) { res.push([i, data[i]]) } return res; } series = [{ data: getBaseData(), lines: { fill: true } }]; var plot = $.plot(container, series, { grid: { color: "#999999", tickColor: "#D4D4D4", borderWidth:0, minBorderMargin: 20, labelMargin: 10, backgroundColor: { colors: ["#ffffff", "#ffffff"] }, margin: { top: 8, bottom: 20, left: 20 }, markings: function(axes) { var markings = []; var xaxis = axes.xaxis; for (var x = Math.floor(xaxis.min); x < xaxis.max; x += xaxis.tickSize * 2) { markings.push({ xaxis: { from: x, to: x + xaxis.tickSize }, color: "#fff" }); } return markings; } }, colors: ["#1ab394"], xaxis: { tickFormatter: function() { return ""; } }, yaxis: { min: 0, max: 110 }, legend: { show: true } }); // Update the random dataset at 25FPS for a smoothly-animating chart setInterval(function updateRandom() { series[0].data = getData(); plot.setData(series); plot.draw(); }, 1000); } var host = window.location.host; var com_ip = $("#com_ip").html(); var masterip = $("#masterip").html(); var url = "//" + host + "/monitor/" + masterip + "/hosts/"+com_ip; function processStatus() { $.post(url+"/status/",{},function(data){ var state = data.monitor.status; if(state == 'RUNNING') is_running = true; else is_running = false; },"json"); } setInterval(processStatus,1000); plot_graph($("#mem-chart"), url + "/meminfo/" ,processMemData,getMemY); plot_graph($("#cpu-chart"), url + "/cpuinfo/",processCpuData,getCpuY); //plot_graph($("#disk-chart"), url + "/diskinfo",processDiskData,getDiskY); $.post(url+"/diskinfo/",{},processDiskData,"json"); ================================================ FILE: web/templates/addCluster.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Create Workspace{% endblock %} {% block css_src %} {% endblock %} {% block panel_title %}Workspace Info{% endblock %} {% block panel_list %} {% endblock %}
{% block content %}

Workspace Add



{{masterdesc}}


{% for image in images['private'] %} {% endfor %} {% for p_user,p_images in images['public'].items() %} {% for image in p_images %} {% endfor %} {% endfor %}
ImageName Type Owner Size Description Choose
base public docklet -- A base image for you
{{image['name']}} private {{user}} {{image['size_format']}} {{image['description']}}
{{image['name']}} public {{p_user}} {{image['size_format']}} {{image['description']}}
{{usage['cpu']}}CORE/{{quota['cpu']}}CORE
{{usage['memory']}}MB/{{quota['memory']}}MB
{{usage['disk']}} MB/{{quota['disk']}}MB (min value is the size of image + 100)

{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/base_AdminLTE.html ================================================ {% block title %}Docklet | Workspace{% endblock %} {%block css_src %}{% endblock %}

{% block panel_title %}Workspace{% endblock %}

{% block panel_list %} {% endblock %}
{% block content %} {% endblock %}
{% if mysession['status'] == 'init' %} {% endif %} {% if mysession['status'] == 'applying' %} {% endif %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/batch/batch_admin_list.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | Batch Job{% endblock %} {% block panel_title %}Batch Job{% endblock %} {% block css_src %} {% endblock %} {% block panel_list %} {% endblock %} {% block content %}

Batch Job List

{% for master in masterips %} {% for job_info in job_list[master.split('@')[0]] %} {% endfor %} {% endfor %}
{% for master in masterips %} {% for job_info in job_list[master.split('@')[0]] %} {% if job_info['status'] == 'done' or job_info['status'] == 'failed' or job_info['status'] == 'stopping' or job_info['status'] == 'stopped'%} {% else %} {% endif %} {% endfor %} {% endfor %}
Location Username ID Name Status Operations Create Time End Time billing Stdout and Stderr Detailed Info
{{ master.split('@')[1] }} {{ job_info['username'] }} {{ job_info['job_id'] }} {{ job_info['job_name'] }} {{ job_info['status'] }} {{ job_info['create_time'] }} {{ job_info['end_time'] }} {{ job_info['billing'] }} Get Output
{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/batch/batch_create.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Create Batch Job{% endblock %} {% block css_src %} {% endblock %} {% block panel_title %}Batch Job Info{% endblock %} {% block panel_list %} {% endblock %}
{% block content %}

Batch Job Create





{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/batch/batch_info.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Batch Job Info{% endblock %} {% block panel_title %}Info for {{ jobinfo['job_id'] }}{% endblock %} {% block css_src %} {% endblock %} {% block panel_list %} {% endblock %} {% block content %}

Overview

Job ID Name Priority Status Create Time End Time Billing
{{ jobinfo['job_id'] }} {{ jobinfo['job_name'] }} {{ jobinfo['priority'] }} {{ jobinfo['status'] }} {{ jobinfo['create_time'] }} {{ jobinfo['end_time'] }} {{ jobinfo['billing'] }}

Tasks Overview

{% for task in jobinfo['tasks'] %} {% if task['status'] == 'scheduling' %} {% else %} {% endif %} {% endfor %}
Task Index Status Failed Reason(if fails) Tried Times Start Time End Time Total Running Time Billing
{{ task['idx'] }}{{ task['status'] }}({{ task['order'] }} scheduling tasks before){{ task['status'] }}{{ task['failed_reason'] }} {{ task['tried_times'] }} {{ task['start_time'] }} {{ task['end_time'] }} {{ task['running_time'] }} s {{ task['billing'] }}

Tasks Configs

{% for task in jobinfo['tasks'] %}
{% if task['config']['runon'] == 'all' %} {% else %} {% endif %} {% if 'atSameTime' in task['config'].keys() %} {% else %} {% endif %} {% if task['config']['image'] == 'base_base_base' %} {% else %} {% endif %}
CPU Cores Memory GPU Disk VNode Number Max Retry Times
{{ task['config']['cpuSetting'] }} {{ task['config']['memorySetting'] }} MB {{ task['config']['gpuSetting'] }} {{ task['config']['diskSetting'] }} MB {{ task['config']['vnodeCount'] }} {{ task['config']['retryCount'] }}
Running Path Expire Time Stdout Redirect Path Stderr Redirect Path Dependency Command
{{ task['config']['srcAddr'] }} {{ task['config']['expTime'] }} seconds {{ task['config']['stdOutRedPth'] }} {{ task['config']['stdErrRedPth'] }} {{ task['config']['dependency'] }} {{ task['config']['command'] }}
Run on Start at the Same Time Image Name Image Owner Image Type
all vnodesmaster vnodeTrueFalsebase docklet public{{ task['config']['image'].split('_')[0] }} {{ task['config']['image'].split('_')[1] }} {{ task['config']['image'].split('_')[2] }}
{% if 'mapping' in task['config'].keys() %}
{% for key in task['config']['mapping'].keys() %} {% endfor %}
Provider Bucket Name AccessKey ID Endpoint Mount Path
{{ task['config']['mapping'][key]['mappingProvider'] }} {{ task['config']['mapping'][key]['mappingBucketName'] }} {{ task['config']['mapping'][key]['mappingAccessKey'] }} {{ task['config']['mapping'][key]['mappingEndpoint'] }} {{ task['config']['mapping'][key]['mappingMountpath'] }}
{% endif %}
{% endfor %}
{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/batch/batch_list.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | Batch Job{% endblock %} {% block panel_title %}Batch Job{% endblock %} {% block css_src %} {% endblock %} {% block panel_list %} {% endblock %} {% block content %}

Batch Job List

{% for master in masterips %} {% for job_info in job_list[master.split('@')[0]] %} {% endfor %} {% endfor %}
{% for master in masterips %} {% for job_info in job_list[master.split('@')[0]] %} {% if job_info['status'] == 'done' or job_info['status'] == 'failed' or job_info['status'] == 'stopping' or job_info['status'] == 'stopped'%} {% else %} {% endif %} {% endfor %} {% endfor %}
Location ID Name Status Operations Create Time End Time billing Stdout and Stderr Detailed Info
{{ master.split('@')[1] }} {{ job_info['job_id'] }} {{ job_info['job_name'] }} {{ job_info['status'] }} {{ job_info['create_time'] }} {{ job_info['end_time'] }} {{ job_info['billing'] }} Get Output
{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/batch/batch_output.html ================================================ Docklet | Batch {{ issue }}: {{ jobid }}/{{ taskid }}/{{ vnodeid }}

Jobid: {{ jobid }}

Taskid: {{ taskid }}

VNodeid: {{ vnodeid }}

The output of {{ issue }} will be updated in every 2 seconds.


{{ output }}
================================================ FILE: web/templates/beansapplication.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Beans Application{% endblock %} {% block panel_title %}Beans Application{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {% block content %}

All Applications

{% for application in applications %} {% endfor %}
Application ID Username Number Submission Time Reason Status
{{ application.id }} {{ application.username }} {{ application.number }} {{ application.time }} {{ application.reason }} {{ application.status }}
{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/cloud.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | Cloud{% endblock %} {% block panel_title %}Cloud{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {% block content %}
{% for master,info in settings.items() %} {% if loop.index == 1 %}
{% else %}
{% endif %}

Cloud Setting

{% endfor %} {% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/config.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | Config{% endblock %} {% block panel_title %}Config{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {% block content %} {% for master in allclusters %} {% for clustername, clusterinfo in allclusters[master].items() %}

WorkSpace Name: {{ clustername }} @ {{master.split("@")[1]}}

{% endfor %} {% endfor %}

Image Info

{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/create_notification.html ================================================ {% extends "base_AdminLTE.html" %} {% block title %}Docklet | Create Notification{% endblock %} {% block panel_title %}Add New Notifications{% endblock %} {% block panel_list %} {% endblock %} {##} {% block content %}

Add New Notifications

{% for group_name in groups %} {% endfor %}
{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/dashboard.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | Workspace{% endblock %} {% block panel_title %}Workspace{% endblock %} {% block panel_list %} {% endblock %} {% block content %}

Workspace

{% for master in allclusters %} {% for cluster in allclusters[master] %} {% if cluster['status'] == 'running' %} {% elif cluster['status'] == 'stopped' %} {% else %} {% endif %} {% endfor %} {% endfor %}
ID Name Status Operation WorkSpace Location
{{ cluster['id'] }} {{ cluster['name'] }}
Running
Stopped
Error
{{master.split("@")[1]}}
{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/description.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | Description{% endblock %} {% block panel_title %}Description{% endblock %} {% block panel_list %}{% endblock %} {% block content %}
{{description}}
{% endblock %} ================================================ FILE: web/templates/error/401.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | Error{% endblock %} {% block panel_title %}401 Error Page{% endblock %} {% block panel_list %} {% endblock %} {% block content %}

401


Unauthorized Action

Sorry, but you did not have the authorizaion for that action, you can go back to dashboard or log out

{% endblock %} ================================================ FILE: web/templates/error/500.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | Error{% endblock %} {% block panel_title %}500 Error Page{% endblock %} {% block panel_list %} {% endblock %} {% block content %}

500


{{ title }}

{{reason|safe}}

{% endblock %} ================================================ FILE: web/templates/error.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | Error{% endblock %} {% block panel_title %}Error{% endblock %} {% block panel_list %}{% endblock %} {% block content %}
{{message}}
{% endblock %} ================================================ FILE: web/templates/home.template ================================================ Docklet | Home

Workspace=Cluster+Service+Data


Package service and data based on virtual cluster as virtual compute environment for your work.
This is your Workspace !

feature-workspace

feature-app

Click and Go


Distributed or single node ? Never mind ! Click it just like start an app on your smart phone, and your workspace is ready for you.


All in Web


All you need is a web browser. Compute in web, code in web, plot in web, anything in web ! You can get to work anytime and anywhere by internet.

feature-web

================================================ FILE: web/templates/listcontainer.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Container{% endblock %} {% block panel_title %}ContainerInfo{% endblock %} {% block panel_list %} {% endblock %} {% block content %}

Cluster Name: {{ clustername }}

{% for container in containers %} {% if status == 'stopped' %} {% else %} {% endif %} {% endfor %}
Node ID Node Name IP Address Status Last Save Image Detail Flush Save
{{ loop.index }} {{ container['containername'] }} {{ container['ip'] }}
Stopped
Running
{{ container['lastsave'] }} {{ container['image'] }} Detail Flush
{% endblock %} ================================================ FILE: web/templates/login.html ================================================ Docklet | Login ================================================ FILE: web/templates/logs.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | Logs{% endblock %} {% block panel_title %}Logs{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {% block content %}

Logs

{% for filename in logs %} {% endfor %}
{% endblock %} ================================================ FILE: web/templates/monitor/history.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | History{% endblock %} {% block panel_title %}History of All Created VNodes{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {% block content %}

All Created VNodes

{% for master in allvnodes %} {% for vnode in allvnodes[master] %} {% endfor %} {% endfor %}
NO VNode name Total billing Location History
{{ loop.index }} {{ vnode.name }} {{ vnode.billing }} beans {{ master.split("@")[1] }} History
{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/monitor/historyVNode.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | History{% endblock %} {% block panel_title %}History of
{{ vnode_name }}
{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {% block content %}

History

{% for record in history %} {% endfor %}
History ID Action Running Time Cpu Time Billing Action Time
{{ record['id'] }} {{ record['action'] }} {{ record['runningtime'] }} seconds {{ record['cputime'] }} seconds {{ record['billing'] }} beans {{ record['actionTime'] }}
{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/monitor/hosts.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Hosts{% endblock %} {% block panel_title %}Hosts Info{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {% block content %}
{% for master in allmachines %} {% if loop.index == 1 %}
{% else %}
{% endif %}

{{master.split("@")[1]}}

{% for phym in allmachines[master] %} {% if phym['status'] == 'STOPPED' %} {% else %} {% endif %} {% endfor %}
NO IP Address Status Nodes running Cpu used Mem used Disk used Summary Operation
{{ loop.index }} {{ phym['ip'] }}
Stopped
Running
/ {{ phym['containers']['total'] }} -- -- -- Realtime
{% endfor %} {% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/monitor/hostsConAll.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Hosts{% endblock %} {% block panel_title %}Node list for {{ com_ip }}{% endblock %} {% block panel_list %} {% endblock %} {% block content %}

Total Nodes

{% for container in containerslist %} {% if container['State'] == 'STOPPED' %} {% else %} {% endif %} {% endfor %}
NO Name Owner Owner's Truename State PID IP Address Cpu used %Cpu Mem used Summary
{{ loop.index }} {{ container['Name'] }} {{ container['owner']['username'] }} {{ container['owner']['truename'] }}
Stopped
-- --
Running
{{ container['PID'] }} {{ container['IP'] }}-- -- -- Realtime
{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/monitor/hostsRealtime.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Hosts{% endblock %} {% block panel_title %}Summary for
{{ com_ip }}
{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {% block content %}

CPU info

{% for processor in processors %} {% endfor %}
Processor ID Model name physical id core id cpu MHz cache size
{{ processor['processor'] }} {{ processor['model name']}} {{ processor['physical id']}} {{ processor['core id']}} {{ processor['cpu MHz']}} {{ processor['cache size']}}

OS info

OS name {{ OSinfo['platform']}}
OS node name {{ OSinfo['node']}}
OS kernel release {{ OSinfo['release']}}
OS kernel version {{ OSinfo['version']}}
OS kernel machine {{ OSinfo['machine']}}

Cpu and Memory Status

Cpu(%) Memory(MiB)
user system iowait idle used free total
-- -- -- -- -- -- --

Disk Status

{% for diskinfo in diskinfos %} {% endfor %}
Disk info
device used(MiB) free(MiB) total(MiB) used percent(%)
-- -- -- -- --

Memory Used(%):

CPU Used(%):

{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/monitor/monitorUserAll.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | MonitorUser{% endblock %} {% block panel_title %}Users Info{% endblock %} {% block panel_list %} {% endblock %} {% block content %}

All Users Info

{% for user in userslist %} {% endfor %}
NO Name Running/Total Clusters Running/Total Containers Register Time Last Login Frequency Detail
{{ loop.index }} {{ user['name'] }} {{ user['clustercnt']['clurun'] }}/{{ user['clustercnt']['clutotal'] }} {{ user['clustercnt']['conrun'] }}/{{ user['clustercnt']['contotal'] }} -- -- -- Clusters
{% endblock %} ================================================ FILE: web/templates/monitor/monitorUserCluster.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Monitor{% endblock %} {% block panel_title %}NodeInfo for {{ muser }}{% endblock %} {% block panel_list %} {% endblock %} {% block content %} {% for cluster in clusters %}

Cluster Name: {{ cluster }}

{% for container in containers[cluster]['containers'] %} {% if containers[cluster]['status'] == 'stopped' %} {% else %} {% endif %} {% endfor %}
Node ID Node Name IP Address Status Create Time detail
{{ loop.index }} {{ container['containername'] }} {{ container['ip'] }}
Stopped
Running
xxxxx Detail
{% endfor %} {% endblock %} ================================================ FILE: web/templates/monitor/status.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Status{% endblock %} {% block panel_title %}Workspace VCluster Status{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {% block content %}

Your Quotas

{% for quotaname in quotanames %} {% endfor %} {% for quotaname in quotanames %} {% if quotaname == 'cpu' %} {% if quotas['cpu'] == '1' %} {% else %} {% endif %} {% elif quotaname == 'memory' or quotaname == 'disk' %} {% elif quotaname == 'idletime' %} {% elif quotaname == 'input_rate_limit' or quotaname == 'output_rate_limit'%} {% elif quotaname == 'data' %} {% else %} {% endif %} {% endfor %}
{{ quotaname }}
{{ quotas['cpu'] }} Core{{ quotas['cpu'] }} Cores{{ quotas[quotaname] }} MB{{ quotas[quotaname] }} hours{{ quotas[quotaname] }} kbps{{ quotas[quotaname] }} GB{{ quotas[quotaname] }}
{% for master in allcontainers %}

Total Network Statistics @ {{master.split("@")[1]}}

Total Bytes Sent Total Bytes Received Total Bytes Transefer Network Billings
-- -- -- --
{% for clustername, clusterinfo in allcontainers[master].items() %}

VCluster Name: {{ clustername }} @ {{master.split("@")[1]}}

{% for container in clusterinfo['containers'] %} {% endfor %} {% for container in clusterinfo['containers'] %} {% if clusterinfo['status'] == 'stopped' %} {% else %} {% endif %} {% endfor %}
Node ID Node Name IP Address Status Running Time Cpu Usage Mem Usage Disk Usage Total Billing Billing This Running Hour Summary
{{ loop.index }} {{ container['containername'] }} {{ container['ip'] }}
Stopped
Running
-- -- -- -- -- -- Realtime
{% endfor %} {% endfor %} {% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/monitor/statusRealtime.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Node Summary{% endblock %} {% block panel_title %}Summary for
{{ node_name }}
{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {% block content %}

Current Status of {{ container['Name'] }}

{% if container['State'] == 'STOPPED' %} {% else %} {% endif %}
State IP Address Running Time CPU Usage Mem Usage Disk Usage Total Billing Billing This Running Hour
Stopped
--
Running
{{ container['IP'] }}{{ container['RunningTime'] }}s -- -- -- -- --

Network Statistics

Ingress Egress
Rate Received Bytes Received Packets Error Packets Dropped Packets Rate Sent Bytes Sent Packets Error Packets Dropped Packets
-- -- -- -- -- -- -- -- -- --

Memory Used(%):

CPU Used(%):

Ingress Rate(kbps):

Egress Rate(kbps):

{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/notification.html ================================================ {% extends "base_AdminLTE.html" %} {% block title %}Docklet | Notification{% endblock %} {% block panel_title %}Notifications{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {##} {% block content %}

Notifications

{% for notify in notifications %} {% endfor %}
ID Title Content Groups Create Date Status Command
{{ notify['id'] }} {{ notify['title'] }} {{ notify['content']|truncate(30) }} {% for group_name in notify['groups'] %}{{ group_name }}  {% endfor %} {{ notify['create_date'] }} {{ notify['status'] }} details  edit  delete
{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/notification_info.html ================================================ {% extends "base_AdminLTE.html" %} {% block title %}Docklet | Notification{% endblock %} {% block panel_title %}Notifications{% endblock %} {% block panel_list %} {% endblock %} {##} {% block content %} {% for notify in notifies %}

{{ notify['title'] }}

{{ notify['content'] }}
{% endfor %} {% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/opfailed.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Failed{% endblock %} {% block panel_title %}Failed{% endblock %} {% block panel_list %} {% endblock %} {% block content %} {% endblock %} ================================================ FILE: web/templates/opsuccess.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Success{% endblock %} {% block panel_title %}Success{% endblock %} {% block panel_list %} {% endblock %} {% block content %}

SUCCESS



{{message}}


Click Here Back To The Workspace
{% endblock %} ================================================ FILE: web/templates/register.html ================================================ Docklet | Login ================================================ FILE: web/templates/saveconfirm.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Confirm{% endblock %} {% block panel_title %}Confirm{% endblock %} {% block css_src %} .hide { display:none; } {% endblock %} {% block panel_list %} {% endblock %} {% block content %}

{% endblock %} ================================================ FILE: web/templates/settings.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | Settings{% endblock %} {% block panel_title %}Settings{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {% block content %}

Quota

{% for quota in quotas %} {% endfor %} {% for group in groups %} {% for quota in quotas %} {% endfor %} {% endfor %}
Name {{ quota['name'] }} Command
{{ group['name'] }} {{ group['quotas'][quota['name']] }} Edit  {% if group['name'] in [ "root", "primary", "admin", "foundation" ] %} Delete  {% else %} Delete  {% endif %} {% if group['name'] == defaultgroup %} {% endif %}

Update Base Image

{% for image in root_image %} {% endfor %}
ImageName CreateTime Description Operation
{{image['name']}} {{image['time']}} {{image['description']}}

Container Default Setting

CORE
MB
MB

Modify Settings

when an activating request is sent, an e-mail will be sent to this address to remind the admin.

if this address is "", no email will be sent to admin.

the e-mail address to send activating e-mail to user

if this address is "", no email will be sent out.

whether allow user to register a new account

if the value is True, it will allow.

whether to start the approval robot that will approve beans applications from users automatically

if the value is True, it will allow.

{% for field in ["docklet"] %}
{% if field == "docklet" %}

Docklet Config

{% else %}

Container Config

{% endif %}
{% endfor %}

Container Config

{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/user/activate.html ================================================ Docklet | Login ================================================ FILE: web/templates/user/info.html ================================================ {% extends 'base_AdminLTE.html' %} {% block title %}Docklet | Information Modify{% endblock %} {% block css_src %} {% endblock %} {% block panel_title %}Detail for User Infomation{% endblock %} {% block panel_list %} {% endblock %} {% block content %}

User Info

{% if info['auth_method'] == 'local' %} {% endif %}
User Name {{ info['username'] }}
Nickname {{ info['nickname'] }}
Description {{ info['description'] }}
Truename {{ info['truename'] }}
Status {{ info['status'] }}
E-mail {{ info['e_mail'] }}
Department {{ info['department'] }}
ID Number {{ info['student_number'] }}
Telephone {{ info['tel'] }}
password
{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/templates/user/mailservererror.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | Error{% endblock %} {% block panel_title %}500 Error Page{% endblock %} {% block panel_list %} {% endblock %} {% block content %}

500


Internal Server Error

Please examine your mail server config(now exim4).You can go back to dashboard or log out

{% endblock %} ================================================ FILE: web/templates/user_list.html ================================================ {% extends "base_AdminLTE.html"%} {% block title %}Docklet | UserList{% endblock %} {% block panel_title %}UserList{% endblock %} {% block panel_list %} {% endblock %} {% block css_src %} {% endblock %} {% block content %}

User List

ID User Name E_mail Tel RegisterDate Status Group Beans Command

Processing Beans Applications

{% for application in applications %} {% endfor %}
Application ID Username Number Submission Time Reason Command
{{ application.id }} {{ application.username }} {{ application.number }} beans {{ application.time }} {{ application.reason }} Agree     Reject
{% endblock %} {% block script_src %} {% endblock %} ================================================ FILE: web/web.py ================================================ #!/usr/bin/python3 import json import os import getopt import sys, inspect this_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])) src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"..", "src"))) if src_folder not in sys.path: sys.path.insert(0, src_folder) # must first init loadenv from utils import tools, env config = env.getenv("CONFIG") tools.loadenv(config) from webViews.log import initlogging initlogging("docklet-web") from webViews.log import logger from flask import Flask, request, session, render_template, redirect, send_from_directory, make_response, url_for, abort from flask_wtf.csrf import CsrfProtect from webViews.dashboard import dashboardView from webViews.user.userlist import userlistView, useraddView, usermodifyView, userdataView, userqueryView from webViews.notification.notification import CreateNotificationView, NotificationView, QuerySelfNotificationsView, \ QueryNotificationView, ModifyNotificationView, DeleteNotificationView from webViews.user.userinfo import userinfoView from webViews.user.userActivate import userActivateView from webViews.syslogs import logsView from webViews.user.grouplist import grouplistView, groupqueryView, groupdetailView, groupmodifyView from functools import wraps from webViews.dockletrequest import dockletRequest from webViews.cluster import * from webViews.admin import * from webViews.monitor import * from webViews.beansapplication import * from webViews.cloud import * from webViews.reportbug import * from webViews.authenticate.auth import login_required, administration_required,activated_required from webViews.authenticate.register import registerView from webViews.authenticate.login import loginView, logoutView from webViews.batch import * import webViews.dockletrequest from webViews import cookie_tool import traceback from werkzeug.utils import secure_filename external_login = env.getenv('EXTERNAL_LOGIN') #default config external_login_url = '/external_auth/' external_login_callback_url = '/external_auth_callback/' if (external_login == 'True'): sys.path.insert(0, os.path.realpath(os.path.abspath(os.path.join(this_folder,"../src", "plugin")))) import external_generate from webViews.authenticate.login import external_loginView, external_login_callbackView external_login_url = external_generate.external_login_url external_login_callback_url = external_generate.external_login_callback_url app = Flask(__name__) CsrfProtect(app) @app.route("/", methods=['GET']) def home(): return render_template('home.html') @app.route("/login/", methods=['GET', 'POST']) def login(): loginView.open_registry = os.environ["OPEN_REGISTRY"] return loginView.as_view() @app.route(external_login_url, methods=['GET']) def external_login_func(): try: return external_loginView.as_view() except: abort(404) @app.route(external_login_callback_url, methods=['GET']) def external_login_callback(): try: return external_login_callbackView.as_view() except: abort(404) @app.route("/logout/", methods=["GET"]) @login_required def logout(): return logoutView.as_view() @app.route("/register/", methods=['GET', 'POST']) #@administration_required #now forbidden,only used by SEI & PKU Staffs and students. #can be used by admin for testing def register(): return registerView.as_view() @app.route("/activate/", methods=['GET', 'POST']) @login_required def activate(): return userActivateView.as_view() @app.route("/dashboard/", methods=['GET']) @login_required def dashboard(): return dashboardView.as_view() @app.route("/document/", methods=['GET']) def redirect_dochome(): return redirect("https://unias.github.io/docklet/userguide/") @app.route("/config/", methods=['GET']) @login_required def config_view(): return configView.as_view() @app.route("/bug/report/", methods=['POST']) @login_required def reportBug(): reportBugView.bugmessage = request.form['bugmessage'] return reportBugView.as_view() @app.route("/admin_batch_list/", methods=['GET']) @login_required def batch_admin_job(): return batchAdminListView().as_view() @app.route("/batch_jobs/", methods=['GET']) @login_required def batch_job(): return batchJobListView().as_view() @app.route("/batch_job/create/", methods=['GET']) @login_required def create_batch_job(): return createBatchJobView().as_view() @app.route("/batch_job//add/", methods=['POST']) @login_required def add_batch_job(masterip): addBatchJobView.masterip = masterip addBatchJobView.job_data = request.form return addBatchJobView().as_view() @app.route("/batch_job//stop//", methods=['GET']) @login_required def stop_batch_job(masterip,jobid): stopBatchJobView.masterip = masterip stopBatchJobView.jobid = jobid return stopBatchJobView().as_view() @app.route("/admin_batch_job//stop//", methods=['GET']) @login_required def admin_stop_batch_job(masterip,jobid): adminStopBatchJobView.masterip = masterip adminStopBatchJobView.jobid = jobid return adminStopBatchJobView().as_view() @app.route("/batch_job//info//", methods=['GET']) @login_required def info_batch_job(masterip,jobid): infoBatchJobView.masterip = masterip infoBatchJobView.jobid = jobid return infoBatchJobView().as_view() @app.route("/batch_job/output//////", methods=['GET']) @login_required def output_batch_job(masterip, jobid, taskid, vnodeid, issue): outputBatchJobView.masterip = masterip outputBatchJobView.jobid = jobid outputBatchJobView.taskid = taskid outputBatchJobView.vnodeid = vnodeid outputBatchJobView.issue = issue return outputBatchJobView().as_view() @app.route("/batch/job/output//////", methods=['POST']) @login_required def output_batch_job_request(masterip, jobid, taskid, vnodeid, issue): data = { 'jobid':jobid, 'taskid':taskid, 'vnodeid':vnodeid, 'issue':issue } result = dockletRequest.post("/batch/job/output/",data,masterip) return json.dumps(result) @app.route("/workspace/create/", methods=['GET']) #@activated_required def addCluster(): return addClusterView.as_view() @app.route("/workspace//list/", methods=['GET']) @login_required def listCluster(masterip): listClusterView.masterip = masterip return listClusterView.as_view() @app.route("/workspace//add/", methods=['POST']) @login_required def createCluster(masterip): createClusterView.clustername = request.form["clusterName"] createClusterView.image = request.form["image"] createClusterView.masterip = masterip return createClusterView.as_view() @app.route("/workspace//scaleout//", methods=['POST']) @login_required def scaleout(clustername,masterip): scaleoutView.image = request.form["image"] scaleoutView.masterip = masterip scaleoutView.clustername = clustername return scaleoutView.as_view() @app.route("/workspace//scalein///", methods=['GET']) @login_required def scalein(clustername,containername,masterip): scaleinView.clustername = clustername scaleinView.containername = containername scaleinView.masterip = masterip return scaleinView.as_view() @app.route("/workspace//start//", methods=['GET']) @login_required def startClustet(clustername,masterip): startClusterView.clustername = clustername startClusterView.masterip = masterip return startClusterView.as_view() @app.route("/workspace//stop//", methods=['GET']) @login_required def stopClustet(clustername,masterip): stopClusterView.clustername = clustername stopClusterView.masterip = masterip return stopClusterView.as_view() @app.route("/workspace//delete//", methods=['GET']) @login_required def deleteClustet(clustername,masterip): deleteClusterView.clustername = clustername deleteClusterView.masterip = masterip return deleteClusterView.as_view() @app.route("/workspace//detail//", methods=['GET']) @login_required def detailCluster(clustername,masterip): detailClusterView.clustername = clustername detailClusterView.masterip = masterip return detailClusterView.as_view() @app.route("/workspace//flush///", methods=['GET']) @login_required def flushCluster(clustername,containername): flushClusterView.clustername = clustername flushClusterView.containername = containername return flushClusterView.as_view() @app.route("/workspace//save///", methods=['POST']) @login_required def saveImage(clustername,containername,masterip): saveImageView.clustername = clustername saveImageView.containername = containername saveImageView.masterip = masterip saveImageView.isforce = "false" saveImageView.imagename = request.form['ImageName'] saveImageView.description = request.form['description'] return saveImageView.as_view() @app.route("/workspace//save///force/", methods=['POST']) @login_required def saveImage_force(clustername,containername,masterip): saveImageView.clustername = clustername saveImageView.containername = containername saveImageView.masterip = masterip saveImageView.isforce = "true" saveImageView.imagename = request.form['ImageName'] saveImageView.description = request.form['description'] return saveImageView.as_view() '''@app.route("/addproxy///", methods=['POST']) @login_required def addproxy(clustername,masterip): addproxyView.clustername = clustername addproxyView.masterip = masterip addproxyView.ip = request.form['proxy_ip'] addproxyView.port = request.form['proxy_port'] return addproxyView.as_view()''' '''@app.route("/deleteproxy///", methods=['GET']) @login_required def deleteproxy(clustername,masterip): deleteproxyView.clustername = clustername deleteproxyView.masterip = masterip return deleteproxyView.as_view()''' @app.route("/port_mapping/add//", methods=['POST']) @login_required def addPortMapping(masterip): addPortMappingView.masterip = masterip return addPortMappingView.as_view() @app.route("/port_mapping/delete/////", methods=['GET']) @login_required def delPortMapping(masterip,clustername,node_name,node_port): delPortMappingView.masterip = masterip delPortMappingView.clustername = clustername delPortMappingView.node_name = node_name delPortMappingView.node_port = node_port return delPortMappingView.as_view() @app.route("/getmasterdesc//", methods=['POST']) @login_required def getmasterdesc(mastername): return env.getenv(mastername+"_desc")[1:-1] @app.route("/masterdesc//", methods=['GET']) @login_required def masterdesc(mastername): descriptionMasterView.desc=env.getenv(mastername+"_desc")[1:-1] return descriptionMasterView.as_view() @app.route("/image//list/", methods=['POST']) @login_required def image_list(masterip): data = { "user": session['username'] } # path = request.path[:request.path.rfind("/")] # path = path[:path.rfind("/")+1] result = dockletRequest.post("/image/list/", data, masterip) logger.debug("image" + str(type(result))) return json.dumps(result) @app.route("/image//description//", methods=['GET']) @login_required def descriptionImage(image,masterip): descriptionImageView.image = image descriptionImageView.masterip = masterip return descriptionImageView.as_view() @app.route("/image//share//", methods=['GET']) @login_required def shareImage(image,masterip): shareImageView.image = image shareImageView.masterip = masterip return shareImageView.as_view() @app.route("/image//unshare//", methods=['GET']) @login_required def unshareImage(image,masterip): unshareImageView.image = image unshareImageView.masterip = masterip return unshareImageView.as_view() @app.route("/image//delete//", methods=['GET']) @login_required def deleteImage(image,masterip): deleteImageView.image = image deleteImageView.masterip = masterip return deleteImageView.as_view() @app.route("/image//copy//", methods=['POST']) @login_required def copyImage(image,masterip): copyImageView.image = image copyImageView.masterip = masterip copyImageView.target = request.form['target'] return copyImageView.as_view() @app.route("/image//updatebase//", methods=['GET']) @login_required def updatebaseImage(image,masterip): updatebaseImageView.image = image updatebaseImageView.masterip = masterip return updatebaseImageView.as_view() @app.route("/hosts/", methods=['GET']) @administration_required def hosts(): return hostsView.as_view() @app.route("/hosts//migrate//", methods=['POST']) @administration_required def hostMigrate(hostip, masterip): hostMigrateView.hostip = hostip hostMigrateView.masterip = masterip hostMigrateView.target = request.form.getlist('target') return hostMigrateView.as_view() @app.route("/hosts///", methods=['GET']) @administration_required def hostsRealtime(com_ip,masterip): hostsRealtimeView.com_ip = com_ip hostsRealtimeView.masterip = masterip return hostsRealtimeView.as_view() @app.route("/hosts///containers/", methods=['GET']) @administration_required def hostsConAll(com_ip,masterip): hostsConAllView.com_ip = com_ip hostsConAllView.masterip = masterip return hostsConAllView.as_view() @app.route("/hosts///containers//", methods=['GET']) @administration_required def hostsConRealtime(com_ip,node_name,masterip): statusRealtimeView.masterip = masterip statusRealtimeView.node_name = node_name return statusRealtimeView.as_view() @app.route("/vclusters/", methods=['GET']) @login_required def status(): return statusView.as_view() @app.route("/vclusters////", methods=['GET']) @login_required def statusRealtime(vcluster_name,node_name,masterip): statusRealtimeView.masterip = masterip statusRealtimeView.node_name = node_name return statusRealtimeView.as_view() @app.route("/history/", methods=['GET']) #@login_required def history(): return historyView.as_view() @app.route("/history///", methods=['GET']) @login_required def historyVNode(vnode_name,masterip): historyVNodeView.masterip = masterip historyVNodeView.vnode_name = vnode_name return historyVNodeView.as_view() @app.route("/monitor//hosts///", methods=['POST']) @app.route("/monitor//vnodes///", methods=['POST']) @login_required def monitor_request(comid,infotype,masterip): data = { "user": session['username'] } path = request.path[request.path.find("/")+1:] path = path[path.find("/")+1:] path = path[path.find("/")+1:] logger.debug(path + "_____" + masterip) result = dockletRequest.post("/monitor/"+path, data, masterip) logger.debug("monitor" + str(type(result))) return json.dumps(result) @app.route("/monitor//user//", methods=['POST']) @login_required def monitor_user_request(issue,masterip): data = { "user": session['username'] } path = "/monitor/user/" + str(issue) + "/" logger.debug(path + "_____" + masterip) result = dockletRequest.post(path, data, masterip) logger.debug("monitor" + str(type(result))) return json.dumps(result) @app.route("/beans/application/", methods=['GET']) @login_required def beansapplication(): return beansapplicationView.as_view() @app.route("/beans/apply/", methods=['POST']) @login_required def beansapply(): return beansapplyView.as_view() @app.route("/beans/admin////", methods=['GET']) @login_required @administration_required def beansadmin(username,msgid,cmd): beansadminView.msgid = msgid beansadminView.username = username if cmd == "agree" or cmd == "reject": beansadminView.cmd = cmd return beansadminView.as_view() else: return redirect("/user/list/") '''@app.route("/monitor/User/", methods=['GET']) @administration_required def monitorUserAll(): return monitorUserAllView.as_view() ''' @app.route("/logs/", methods=['GET', 'POST']) @administration_required def logs(): return logsView.as_view() @app.route("/logs//", methods=['GET']) @administration_required def logs_get(filename): data = { "filename": filename } result = dockletRequest.post('/logs/get/', data).get('result', '') response = make_response(result) response.headers["content-type"] = "text/plain" return response @app.route("/user/list/", methods=['GET', 'POST']) @administration_required def userlist(): return userlistView.as_view() @app.route("/user/lock/release//", methods=['GET', 'POST']) @administration_required def userLockRelease(ulockname): data = { "ulockname": ulockname } result = dockletRequest.post_to_all("/admin/ulock/release/", data) #logger.debug(result) return json.dumps(result) @app.route("/group/list/", methods=['POST']) @administration_required def grouplist(): return grouplistView.as_view() @app.route("/group/detail/", methods=['POST']) @administration_required def groupdetail(): return groupdetailView.as_view() @app.route("/group/query/", methods=['POST']) @administration_required def groupquery(): return groupqueryView.as_view() @app.route("/group/modify//", methods=['POST']) @administration_required def groupmodify(groupname): return groupmodifyView.as_view() @app.route("/user/data/", methods=['GET', 'POST']) @administration_required def userdata(): return userdataView.as_view() @app.route("/user/add/", methods=['POST']) @administration_required def useradd(): return useraddView.as_view() @app.route("/user/modify/", methods=['POST']) @administration_required def usermodify(): return usermodifyView.as_view() @app.route("/user/change/", methods=['POST']) @administration_required def userchange(): return usermodifyView.as_view() @app.route("/quota/add/", methods=['POST']) @administration_required def quotaadd(): return quotaaddView.as_view() @app.route("/quota/chdefault/", methods=['POST']) @administration_required def chdefault(): return chdefaultView.as_view() @app.route("/quota/chlxcsetting/", methods=['POST']) @administration_required def chlxcsetting(): return chlxcsettingView.as_view() @app.route("/group/add/", methods=['POST']) @administration_required def groupadd(): return groupaddView.as_view() @app.route("/group/delete//", methods=['POST', 'GET']) @administration_required def groupdel(groupname): groupdelView.groupname = groupname return groupdelView.as_view() @app.route("/user/info/", methods=['GET', 'POST']) @login_required def userinfo(): return userinfoView.as_view() @app.route("/user/selfQuery/", methods=['GET', 'POST']) @login_required def userselfQuery(): result = dockletRequest.post('/user/selfQuery/') return json.dumps(result['data']) @app.route("/user/query/", methods=['GET', 'POST']) @administration_required def userquery(): return userqueryView.as_view() @app.route("/cloud/", methods=['GET', 'POST']) @administration_required def cloud(): return cloudView.as_view() @app.route("/cloud//setting/modify/", methods = ['POST']) @administration_required def cloud_setting_modify(masterip): cloudSettingModifyView.masterip = masterip return cloudSettingModifyView.as_view() @app.route("/cloud//node/add/", methods = ['POST', 'GET']) @administration_required def cloud_node_add(masterip): cloudNodeAddView.masterip = masterip return cloudNodeAddView.as_view() @app.route("/notification/", methods=['GET']) @administration_required def notification_list(): return NotificationView.as_view() @app.route("/notification/create/", methods=['GET', 'POST']) @administration_required def create_notification(): return CreateNotificationView.as_view() @app.route("/notification/modify/", methods=['POST']) @administration_required def modify_notification(): return ModifyNotificationView.as_view() @app.route("/notification/delete/", methods=['POST']) @administration_required def delete_notification(): return DeleteNotificationView.as_view() @app.route("/notification/query_self/", methods=['POST']) @login_required def query_self_notifications(): return QuerySelfNotificationsView.as_view() @app.route("/notification/detail//", methods=['GET']) @login_required def query_notification_detail(notify_id): return QueryNotificationView.get_by_id(notify_id) @app.route("/system/modify/", methods=['POST']) @administration_required def systemmodify(): return systemmodifyView.as_view() @app.route("/system/clear_history/", methods=['POST']) @administration_required def systemclearhistory(): return systemclearView.as_view() @app.route("/system/add/", methods=['POST']) @administration_required def systemadd(): return systemaddView.as_view() @app.route("/system/delete/", methods=['POST']) @administration_required def systemdelete(): return systemdeleteView.as_view() @app.route("/system/resetall/", methods=['POST']) @administration_required def systemresetall(): return systemresetallView.as_view() @app.route("/settings/", methods=['GET', 'POST']) @administration_required def adminpage(): return adminView.as_view() @app.route("/settings/update/", methods=['POST']) @administration_required def updatesettings(): return updatesettingsView.as_view() @app.route('/index/', methods=['GET']) def jupyter_control(): return redirect('/dashboard/') # for download basefs.tar.bz # remove, not the function of docklet # should download it from a http server #@app.route('/download/basefs', methods=['GET']) #def download(): #fsdir = env.getenv("FS_PREFIX") #return send_from_directory(fsdir+'/local', 'basefs.tar.bz', as_attachment=True) # jupyter auth APIs @app.route('/jupyter/', methods=['GET']) def jupyter_prefix(): path = request.args.get('next') if path == None: return redirect('/login/') return redirect('/login/'+'?next='+path) @app.route('/jupyter/home/', methods=['GET']) def jupyter_home(): return redirect('/dashboard/') @app.route('/jupyter/login/', methods=['GET', 'POST']) def jupyter_login(): return redirect('/login/') @app.route('/jupyter/logout/', methods=['GET']) def jupyter_logout(): return redirect('/logout/') @app.route('/jupyter/authorizations/cookie///', methods=['GET']) def jupyter_auth(cookie_name, cookie_content): username = cookie_tool.parse_cookie(cookie_content, app.secret_key) if username == None: resp = make_response('cookie auth failed') resp.status_code = 404 return resp return json.dumps({'name': username}) @app.errorhandler(401) def not_authorized(error): if "username" in session: if "401" in session: reason = session['401'] session.pop('401', None) if (reason == 'Token Expired'): return redirect('/logout/') return render_template('error/401.html', mysession = session) else: return redirect('/login/') @app.errorhandler(500) def internal_server_error(error): logger.error(error) logger.error(traceback.format_exc()) if "username" in session: if "500" in session and "500_title" in session: reason = session['500'] title = session['500_title'] session.pop('500', None) session.pop('500_title', None) else: reason = '''The server encountered something unexpected that didn't allow it to complete the request. We apologize.You can go back to dashboard or log out''' title = 'Internal Server Error' return render_template('error/500.html', mysession = session, reason = reason, title = title) else: return redirect('/login/') if __name__ == '__main__': ''' to generate a secret_key from base64 import b64encode from os import urandom secret_key = urandom(24) secret_key = b64encode(secret_key).decode('utf-8') ''' logger.info('Start Flask...:') try: secret_key_file = open(env.getenv('FS_PREFIX') + '/local/web_secret_key.txt') app.secret_key = secret_key_file.read() secret_key_file.close() except: from base64 import b64encode from os import urandom secret_key = urandom(24) secret_key = b64encode(secret_key).decode('utf-8') app.secret_key = secret_key secret_key_file = open(env.getenv('FS_PREFIX') + '/local/web_secret_key.txt', 'w') secret_key_file.write(secret_key) secret_key_file.close() try: open_registryfile = open(env.getenv('FS_PREFIX') + '/local/settings.conf') settings = json.loads(open_registryfile.read()) open_registryfile.close() os.environ['OPEN_REGISTRY'] = settings.get('OPEN_REGISTRY',"False") except: os.environ['OPEN_REGISTRY'] = "False" os.environ['APP_KEY'] = app.secret_key runcmd = sys.argv[0] app.runpath = runcmd.rsplit('/', 1)[0] webip = "0.0.0.0" webport = env.getenv("WEB_PORT") webViews.dockletrequest.endpoint = 'http://%s:%d' % (env.getenv('MASTER_IP'), env.getenv('MASTER_PORT')) try: opts, args = getopt.getopt(sys.argv[1:], "i:p:", ["ip=", "port="]) except getopt.GetoptError: print ("%s -i ip -p port" % sys.argv[0]) sys.exit(2) for opt, arg in opts: if opt in ("-i", "--ip"): webip = arg elif opt in ("-p", "--port"): webport = int(arg) # Set True when using ssl/https #app.config['SESSION_COOKIE_SECURE'] = True app.run(host = webip, port = webport, threaded=True) ================================================ FILE: web/webViews/admin.py ================================================ from flask import session, render_template, redirect, request from webViews.view import normalView from webViews.dockletrequest import dockletRequest from webViews.dashboard import * import time, re, json, os class adminView(normalView): template_path = "settings.html" @classmethod def get(self): result = dockletRequest.post('/user/groupList/') groups = result["groups"] quotas = result["quotas"] defaultgroup = result["default"] parms = dockletRequest.post('/system/parmList/') rootimage = dockletRequest.post('/image/list/').get('images') lxcsetting = dockletRequest.post('/user/lxcsettingList/')['data'] settings = dockletRequest.post('/settings/list/')['result'] return self.render(self.template_path, groups = groups, quotas = quotas, defaultgroup = defaultgroup, parms = parms, lxcsetting = lxcsetting, root_image = rootimage['private'], settings=settings) class updatesettingsView(normalView): @classmethod def post(self): result = dockletRequest.post("/settings/update/", request.form) os.environ['OPEN_REGISTRY'] = request.form.get('OPEN_REGISTRY') return redirect('/settings/') class groupaddView(normalView): @classmethod def post(self): dockletRequest.post('/user/groupadd/', request.form) return redirect('/settings/') class systemmodifyView(normalView): @classmethod def post(self): dockletRequest.post('/system/modify/', request.form) return redirect('/settings/') class systemclearView(normalView): @classmethod def post(self): dockletRequest.post('/system/clear_history/', request.form) return redirect('/settings/') class systemaddView(normalView): @classmethod def post(self): dockletRequest.post('/system/add/', request.form) return redirect('/settings/') class systemdeleteView(normalView): @classmethod def post(self): dockletRequest.post('/system/delete/', request.form) return redirect('/settings/') class systemresetallView(normalView): @classmethod def post(self): dockletRequest.post('/system/reset_all/', request.form) return redirect('/settings/') class quotaaddView(normalView): @classmethod def post(self): dockletRequest.post('/user/quotaadd/', request.form) return redirect('/settings/') class chdefaultView(normalView): @classmethod def post(self): dockletRequest.post('/user/chdefault/', request.form) return redirect('/settings/') class chlxcsettingView(normalView): @classmethod def post(self): dockletRequest.post('/user/chlxcsetting/', request.form) return redirect('/settings/') class groupdelView(normalView): @classmethod def post(self): data = { "name" : self.groupname, } dockletRequest.post('/user/groupdel/', data) return redirect('/settings/') @classmethod def get(self): return self.post() class chparmView(normalView): @classmethod def post(self): dockletRequest.post('/system/chparm/', request.form) class historydelView(normalView): @classmethod def post(self): dockletRequest.post('/system/historydel/', request.form) return redirect('/settings/') class updatebaseImageView(normalView): @classmethod def get(self): data = { "image": self.image } dockletRequest.post('/image/updatebase/', data) return redirect("/settings/") class hostMigrateView(normalView): @classmethod def post(self): data = { "src_host": self.hostip, "dst_host_list": self.target } dockletRequest.post("/host/migrate/", data, self.masterip) return redirect("/hosts/") ================================================ FILE: web/webViews/authenticate/auth.py ================================================ from flask import session, request, abort, redirect from functools import wraps def login_required(func): @wraps(func) def wrapper(*args, **kwargs): if request.method == 'POST' : if not is_authenticated(): abort(401) else: return func(*args, **kwargs) else: if not is_authenticated(): return redirect("/login/" + "?next=" + request.path) else: return func(*args, **kwargs) return wrapper def administration_required(func): @wraps(func) def wrapper(*args, **kwargs): if not is_admin(): abort(401) else: return func(*args, **kwargs) return wrapper def activated_required(func): @wraps(func) def wrapper(*args, **kwargs): if not is_activated(): abort(401) else: return func(*args, **kwargs) return wrapper def is_authenticated(): if "username" in session: return True else: return False def is_admin(): if not "username" in session: return False if not (session['usergroup'] == 'root' or session['usergroup'] == 'admin'): return False return True def is_activated(): if not "username" in session: return False if not (session['status']=='normal'): return False return True ================================================ FILE: web/webViews/authenticate/login.py ================================================ from webViews.view import normalView from webViews.authenticate.auth import is_authenticated from webViews.dockletrequest import dockletRequest from flask import redirect, request, render_template, session, make_response, abort from webViews import cookie_tool import hashlib #from suds.client import Client import os, sys, inspect this_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])) src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../../..", "src"))) if src_folder not in sys.path: sys.path.insert(0, src_folder) from utils import env if (env.getenv('EXTERNAL_LOGIN') == 'True'): sys.path.insert(0, os.path.realpath(os.path.abspath(os.path.join(this_folder,"../../../src", "plugin")))) import external_generate def refreshInfo(): data = {} result = dockletRequest.post('/user/selfQuery/', data) ok = result and result.get('success', None) if (ok and ok == "true"): session['username'] = result['data']['username'] session['nickname'] = result['data']['nickname'] session['description'] = result['data']['description'] session['avatar'] = '/static/avatar/'+ result['data']['avatar'] session['usergroup'] = result['data']['group'] session['status'] = result['data']['status'] else: abort(404) class loginView(normalView): template_path = "login.html" @classmethod def get(self): if is_authenticated(): refreshInfo() return redirect(request.args.get('next',None) or '/dashboard/') if (env.getenv('EXTERNAL_LOGIN') == 'True'): url = external_generate.external_login_url link = external_generate.external_login_link else: link = '' url = '' return render_template(self.template_path, loginMsg="", link = link, url = url, open_registry=self.open_registry) @classmethod def post(self): if (request.form['username']): data = {"user": request.form['username'], "key": request.form['password'], 'ip': request.remote_addr} result = dockletRequest.unauthorizedpost('/login/', data) ok = result and result.get('success', None) if (ok and (ok == "true")): # set cookie:docklet-jupyter-cookie for jupyter notebook resp = make_response(redirect(request.args.get('next',None) or '/dashboard/')) app_key = os.environ['APP_KEY'] resp.set_cookie('docklet-jupyter-cookie', cookie_tool.generate_cookie(request.form['username'], app_key)) # set session for docklet session['username'] = request.form['username'] session['nickname'] = result['data']['nickname'] session['description'] = result['data']['description'] session['avatar'] = '/static/avatar/'+ result['data']['avatar'] session['usergroup'] = result['data']['group'] session['status'] = result['data']['status'] session['token'] = result['data']['token'] return resp else: if (env.getenv('EXTERNAL_LOGIN') == 'True'): url = external_generate.external_login_url link = external_generate.external_login_link else: link = '' url = '' loginMsg = result.get('message', '') return render_template(self.template_path, loginMsg=loginMsg, link = link, url = url, open_registry=self.open_registry) else: return redirect('/login/') class logoutView(normalView): @classmethod def get(self): resp = make_response(redirect('/login/')) session.pop('username', None) session.pop('nickname', None) session.pop('description', None) session.pop('avatar', None) session.pop('status', None) session.pop('usergroup', None) session.pop('token', None) resp.set_cookie('docklet-jupyter-cookie', '', expires=0) return resp class external_login_callbackView(normalView): @classmethod def get(self): form = external_generate.external_auth_generate_request() result = dockletRequest.unauthorizedpost('/external_login/', form) ok = result and result.get('success', None) if (ok and (ok == "true")): # set cookie:docklet-jupyter-cookie for jupyter notebook resp = make_response(redirect(request.args.get('next',None) or '/dashboard/')) app_key = os.environ['APP_KEY'] resp.set_cookie('docklet-jupyter-cookie', cookie_tool.generate_cookie(result['data']['username'], app_key)) # set session for docklet session['username'] = result['data']['username'] session['nickname'] = result['data']['nickname'] session['description'] = result['data']['description'] session['avatar'] = '/static/avatar/'+ result['data']['avatar'] session['usergroup'] = result['data']['group'] session['status'] = result['data']['status'] session['token'] = result['data']['token'] return resp else: return redirect('/login/') @classmethod def post(self): form = external_generate.external_auth_generate_request() result = dockletRequest.unauthorizedpost('/external_login/', form) ok = result and result.get('success', None) if (ok and (ok == "true")): # set cookie:docklet-jupyter-cookie for jupyter notebook resp = make_response(redirect(request.args.get('next',None) or '/dashboard/')) app_key = os.environ['APP_KEY'] resp.set_cookie('docklet-jupyter-cookie', cookie_tool.generate_cookie(result['data']['username'], app_key)) # set session for docklet session['username'] = result['data']['username'] session['nickname'] = result['data']['nickname'] session['description'] = result['data']['description'] session['avatar'] = '/static/avatar/'+ result['data']['avatar'] session['usergroup'] = result['data']['group'] session['status'] = result['data']['status'] session['token'] = result['data']['token'] return resp else: return redirect('/login/') class external_loginView(normalView): if (env.getenv('EXTERNAL_LOGIN') == 'True'): template_path = external_generate.html_path @classmethod def post(self): return render_template(self.template_path) @classmethod def get(self): return self.post() ================================================ FILE: web/webViews/authenticate/register.py ================================================ from webViews.view import normalView from webViews.dockletrequest import dockletRequest from flask import redirect, request, abort, render_template class registerView(normalView): template_path = 'register.html' @classmethod def post(self): form = dict(request.form) if (request.form.get('username') == None or request.form.get('password') == None or request.form.get('password') != request.form.get('password2') or request.form.get('email') == None or request.form.get('description') == None): abort(500) result = dockletRequest.unauthorizedpost('/register/', form) return redirect("/login/") @classmethod def get(self): return render_template(self.template_path) ================================================ FILE: web/webViews/batch.py ================================================ from flask import session, redirect, request from webViews.view import normalView from webViews.log import logger from webViews.checkname import checkname from webViews.dockletrequest import dockletRequest from utils import env import json class batchAdminListView(normalView): template_path = "batch/batch_admin_list.html" @classmethod def get(self): masterips = dockletRequest.post_to_all() job_list = {} for ipname in masterips: ip = ipname.split("@")[0] result = dockletRequest.post("/batch/job/listall/",{},ip) job_list[ip] = result.get("data") logger.debug("job_list[%s]: %s" % (ip,job_list[ip])) if True: return self.render(self.template_path, masterips=masterips, job_list=job_list) else: return self.error() class batchJobListView(normalView): template_path = "batch/batch_list.html" @classmethod def get(self): masterips = dockletRequest.post_to_all() job_list = {} for ipname in masterips: ip = ipname.split("@")[0] result = dockletRequest.post("/batch/job/list/",{},ip) job_list[ip] = result.get("data") logger.debug("job_list[%s]: %s" % (ip,job_list[ip])) if True: return self.render(self.template_path, masterips=masterips, job_list=job_list) else: return self.error() class createBatchJobView(normalView): template_path = "batch/batch_create.html" @classmethod def get(self): masterips = dockletRequest.post_to_all() images = {} for master in masterips: images[master.split("@")[0]] = dockletRequest.post("/image/list/",{},master.split("@")[0]).get("images") logger.info(images) data = { "user": session['username'], } allresult = dockletRequest.post_to_all('/monitor/listphynodes/', data) allmachines = {} for master in allresult: allmachines[master.split("@")[0]] = [] iplist = allresult[master].get('monitor').get('allnodes') for ip in iplist: result = dockletRequest.post('/monitor/hosts/%s/gpuinfo/'%(ip), data, master.split("@")[0]) gpuinfo = result.get('monitor').get('gpuinfo') allmachines[master.split("@")[0]].append(gpuinfo) pending_gpu_tasks = {} for master in masterips: pending_gpu_tasks[master.split("@")[0]] = dockletRequest.post("/monitor/pending_gpu_tasks/",{},master.split("@")[0]).get("monitor").get("pending_tasks") return self.render(self.template_path, user=session['username'], masterips=masterips, images=images, allmachines=allmachines, pending_gpu_tasks=pending_gpu_tasks) class infoBatchJobView(normalView): template_path = "batch/batch_info.html" error_path = "error.html" masterip = "" jobid = "" @classmethod def get(self): data = { 'jobid':self.jobid } result = dockletRequest.post("/batch/job/info/",data,self.masterip) data = result.get("data") logger.info(str(data)) #logger.debug("job_list: %s" % job_list) if result.get('success',"") == "true": return self.render(self.template_path, masterip=self.masterip, jobinfo=data) else: return self.render(self.error_path, message = result.get('message')) class addBatchJobView(normalView): template_path = "batch/batch_list.html" error_path = "error.html" @classmethod def post(self): masterip = self.masterip result = dockletRequest.post("/batch/job/add/", self.job_data, masterip) if result.get('success', None) == "true": return redirect('/batch_jobs/') else: return self.render(self.error_path, message = result.get('message')) class stopBatchJobView(normalView): template_path = "batch/batch_list.html" error_path = "error.html" @classmethod def get(self): masterip = self.masterip data = {'jobid':self.jobid} result = dockletRequest.post("/batch/job/stop/", data, masterip) if result.get('success', None) == "true": return redirect('/batch_jobs/') else: return self.render(self.error_path, message = result.get('message')) class adminStopBatchJobView(normalView): template_path = "batch/batch_admin_list.html" error_path = "error.html" @classmethod def get(self): masterip = self.masterip data = {'jobid':self.jobid} result = dockletRequest.post("/batch/job/stop/", data, masterip) if result.get('success', None) == "true": return redirect('/admin_batch_list/') else: return self.render(self.error_path, message = result.get('message')) class outputBatchJobView(normalView): template_path = "batch/batch_output.html" masterip = "" jobid = "" taskid = "" vnodeid = "" issue = "" @classmethod def get(self): data = { 'jobid':self.jobid, 'taskid':self.taskid, 'vnodeid':self.vnodeid, 'issue':self.issue } result = dockletRequest.post("/batch/job/output/",data,self.masterip) output = result.get("data") #logger.debug("job_list: %s" % job_list) if result.get('success',"") == "true": return self.render(self.template_path, masterip=self.masterip, jobid=self.jobid, taskid=self.taskid, vnodeid=self.vnodeid, issue=self.issue, output=output) else: return self.error() ================================================ FILE: web/webViews/beansapplication.py ================================================ from flask import session,render_template,request,redirect from webViews.view import normalView from webViews.dockletrequest import dockletRequest class beansapplicationView(normalView): template_path = "beansapplication.html" @classmethod def get(self): result = dockletRequest.post('/beans/applymsgs/').get('applymsgs') return self.render(self.template_path, applications = result) @classmethod def post(self): return self.get() class beansapplyView(normalView): template_path = "error.html" @classmethod def post(self): data = {"number":request.form["number"],"reason":request.form["reason"]} result = dockletRequest.post('/beans/apply/',data) success = result.get("success") if success == "true": return redirect("/beans/application/") else: return self.render(self.template_path, message = result.get("message")) @classmethod def get(self): return self.post() class beansadminView(normalView): username = "" msgid = "" cmd = "" template_path = "error.html" @classmethod def get(self): data = {"username":self.username, "msgid":self.msgid} result = dockletRequest.post('/beans/admin/'+self.cmd+"/",data) success = result.get("success") if success == "true": return redirect("/user/list/") else: return self.render(self.template_path, message = result.get("message")) ================================================ FILE: web/webViews/checkname.py ================================================ import re from flask import abort, session pattern = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*') error_msg = ''' Your name may cause errors, Please use names starting with a-z,A-Z or _ and contains only elements in {a-z, A-Z, _, 0-9} ''' error_title = 'Input Error' def checkname(str): try: match = pattern.match(str) if (match == None): session['500'] = error_msg session['500_title'] = error_title abort(500) if (match.group() != str): session['500'] = error_msg session['500_title'] = error_title abort(500) return True except: session['500'] = error_msg session['500_title'] = error_title abort(500) ================================================ FILE: web/webViews/cloud.py ================================================ from flask import session, render_template, redirect, request from webViews.view import normalView from webViews.dockletrequest import dockletRequest import time, re, json class cloudView(normalView): template_path = "cloud.html" @classmethod def post(self): settings = dockletRequest.post_to_all('/cloud/setting/get/') return self.render(self.template_path, settings = settings) @classmethod def get(self): return self.post() class cloudSettingModifyView(normalView): @classmethod def post(self): dockletRequest.post('/cloud/setting/modify/', request.form, self.masterip) return redirect('/cloud/') class cloudNodeAddView(normalView): @classmethod def post(self): data = {} dockletRequest.post('/cloud/node/add/', data, self.masterip) return redirect('/hosts/') @classmethod def get(self): return self.post() ================================================ FILE: web/webViews/cluster.py ================================================ from flask import session, redirect, request from webViews.view import normalView from webViews.dockletrequest import dockletRequest from webViews.dashboard import * from webViews.checkname import checkname import time, re class addClusterView(normalView): template_path = "addCluster.html" @classmethod def get(self): masterips = dockletRequest.post_to_all() images = dockletRequest.post("/image/list/",{},masterips[0].split("@")[0]).get("images") desc = dockletRequest.getdesc(masterips[0].split("@")[1]) result = dockletRequest.post("/user/usageQuery/") quota = result.get("quota") usage = result.get("usage") default = result.get("default") restcpu = int(quota['cpu']) - int(usage['cpu']) restmemory = int(quota['memory']) - int(usage['memory']) restdisk = int(quota['disk']) - int(usage['disk']) if restcpu >= int(default['cpu']): defaultcpu = default['cpu'] elif restcpu <= 0: defaultcpu = "0" else: defaultcpu = str(restcpu) if restmemory >= int(default['memory']): defaultmemory = default['memory'] elif restmemory <= 0: defaultmemory = "0" else: defaultmemory = str(restmemory) if restdisk >= int(default['disk']): defaultdisk = default['disk'] elif restdisk <= 0: defaultdisk = "0" else: defaultdisk = str(restdisk) defaultsetting = { 'cpu': defaultcpu, 'memory': defaultmemory, 'disk': defaultdisk } if (result): return self.render(self.template_path, user = session['username'],masterips = masterips, images = images, quota = quota, usage = usage, defaultsetting = defaultsetting, masterdesc=desc) else: self.error() class createClusterView(normalView): template_path = "dashboard.html" error_path = "error.html" @classmethod def post(self): masterip = self.masterip index1 = self.image.rindex("_") index2 = self.image[:index1].rindex("_") checkname(self.clustername) data = { "clustername": self.clustername, 'imagename': self.image[:index2], 'imageowner': self.image[index2+1:index1], 'imagetype': self.image[index1+1:], } result = dockletRequest.post("/cluster/create/", dict(data, **(request.form)), masterip) if(result.get('success', None) == "true"): return redirect("/dashboard/") #return self.render(self.template_path, user = session['username']) else: return self.render(self.error_path, message = result.get('message')) class descriptionMasterView(normalView): template_path = "description.html" @classmethod def get(self): return self.render(self.template_path, description=self.desc) class descriptionImageView(normalView): template_path = "description.html" @classmethod def get(self): masterip = self.masterip index1 = self.image.rindex("_") index2 = self.image[:index1].rindex("_") data = { "imagename": self.image[:index2], "imageowner": self.image[index2+1:index1], "imagetype": self.image[index1+1:] } result = dockletRequest.post("/image/description/", data, masterip) if(result): description = result.get("message") return self.render(self.template_path, description = description) else: self.error() class scaleoutView(normalView): error_path = "error.html" @classmethod def post(self): masterip = self.masterip index1 = self.image.rindex("_") index2 = self.image[:index1].rindex("_") data = { "clustername": self.clustername, 'imagename': self.image[:index2], 'imageowner': self.image[index2+1:index1], 'imagetype': self.image[index1+1:] } result = dockletRequest.post("/cluster/scaleout/", dict(data, **(request.form)), masterip) if(result.get('success', None) == "true"): return redirect("/config/") else: return self.render(self.error_path, message = result.get('message')) class scaleinView(normalView): error_path = "error.html" @classmethod def get(self): masterip = self.masterip data = { "clustername": self.clustername, "containername":self.containername } result = dockletRequest.post("/cluster/scalein/", data, masterip) if(result.get('success', None) == "true"): return redirect("/config/") else: return self.render(self.error_path, message = result.get('message')) class listClusterView(normalView): template_path = "listCluster.html" @classmethod def get(self): masterip = self.masterip result = dockletRequest.post("/cluster/list/", {}, masterip) clusters = result.get("clusters") if(result): return self.render(self.template_path, user = session['username'], clusters = clusters) else: self.error() class startClusterView(normalView): template_path = "dashboard.html" error_path = "error.html" @classmethod def get(self): masterip = self.masterip data = { "clustername": self.clustername } result = dockletRequest.post("/cluster/start/", data, masterip) if(result.get('success', None) == "true"): return redirect("/dashboard/") #return self.render(self.template_path, user = session['username']) else: return self.render(self.error_path, message = result.get('message')) class stopClusterView(normalView): template_path = "dashboard.html" error_path = "error.html" @classmethod def get(self): masterip = self.masterip data = { "clustername": self.clustername } result = dockletRequest.post("/cluster/stop/", data, masterip) if(result.get('success', None) == "true"): return redirect("/dashboard/") else: return self.render(self.error_path, message = result.get('message')) class flushClusterView(normalView): success_path = "opsuccess.html" failed_path = "opfailed.html" @classmethod def get(self): data = { "clustername": self.clustername, "from_lxc": self.containername } result = dockletRequest.post("/cluster/flush/", data) if(result): if result.get('success') == "true": return self.render(self.success_path, user = session['username']) else: return self.render(self.failed_path, user = session['username']) else: self.error() class deleteClusterView(normalView): template_path = "dashboard.html" error_path = "error.html" @classmethod def get(self): masterip = self.masterip data = { "clustername": self.clustername } result = dockletRequest.post("/cluster/delete/", data, masterip) if(result.get('success', None) == "true"): return redirect("/dashboard/") else: return self.render(self.error_path, message = result.get('message')) class detailClusterView(normalView): template_path = "listcontainer.html" @classmethod def get(self): masterip = self.masterip data = { "clustername": self.clustername } result = dockletRequest.post("/cluster/info/", data, masterip) if(result): message = result.get('message') containers = message['containers'] status = message['status'] return self.render(self.template_path, containers = containers, user = session['username'], clustername = self.clustername, status = status) else: self.error() class saveImageView(normalView): template_path = "saveconfirm.html" success_path = "opsuccess.html" error_path = "error.html" @classmethod def post(self): masterip = self.masterip data = { "clustername": self.clustername, "image": self.imagename, "containername": self.containername, "description": self.description, "isforce": self.isforce } result = dockletRequest.post("/cluster/save/", data, masterip) if(result): if result.get('success') == 'true': #return self.render(self.success_path, user = session['username']) return redirect("/config/") #res = detailClusterView() #res.clustername = self.clustername #return res.as_view() else: if result.get('reason') == "exists": return self.render(self.template_path, containername = self.containername, clustername = self.clustername, image = self.imagename, user = session['username'], description = self.description, masterip=masterip) else: return self.render(self.error_path, message = result.get('message')) else: self.error() class shareImageView(normalView): template_path = "dashboard.html" @classmethod def get(self): masterip = self.masterip data = { "image": self.image } result = dockletRequest.post("/image/share/", data, masterip) if(result): return redirect("/config/") else: self.error() class unshareImageView(normalView): template_path = "dashboard.html" @classmethod def get(self): masterip = self.masterip data = { "image": self.image } result = dockletRequest.post("/image/unshare/", data, masterip) if(result): return redirect("/config/") else: self.error() class copyImageView(normalView): error_path = "error.html" @classmethod def post(self): masterip = self.masterip data = { "image": self.image, "target": self.target } result = dockletRequest.post("/image/copy/", data, masterip) if result: if result.get('success') == 'true': return redirect("/config/") else: return self.render(self.error_path,message=result.get('message')) else: self.error() class deleteImageView(normalView): template_path = "dashboard.html" @classmethod def get(self): masterip = self.masterip data = { "image": self.image } result = dockletRequest.post("/image/delete/", data, masterip) if(result): return redirect("/config/") else: self.error() class addproxyView(normalView): @classmethod def post(self): masterip = self.masterip data = { "clustername": self.clustername, "ip": self.ip, "port": self.port } result = dockletRequest.post("/addproxy/", data, masterip) if(result): return redirect("/config/") else: self.error() class deleteproxyView(normalView): @classmethod def get(self): masterip = self.masterip data = { "clustername":self.clustername } result = dockletRequest.post("/deleteproxy/", data, masterip) if(result): return redirect("/config/") else: self.error() @classmethod def post(self): return self.get() class configView(normalView): @classmethod def get(self): masterips = dockletRequest.post_to_all() allimages = dockletRequest.post_to_all('/image/list/') for master in allimages: allimages[master] = allimages[master].get('images') allclusters = dockletRequest.post_to_all("/cluster/list/") for master in allclusters: allclusters[master] = allclusters[master].get('clusters') allclusters_info = {} clusters_info = {} data={} for master in allclusters: allclusters_info[master] = {} for cluster in allclusters[master]: data["clustername"] = cluster result = dockletRequest.post("/cluster/info/", data, master.split("@")[0]).get("message") allclusters_info[master][cluster] = result result = dockletRequest.post("/user/usageQuery/") quota = result.get("quota") usage = result.get("usage") default = result.get("default") restcpu = int(quota['cpu']) - int(usage['cpu']) restmemory = int(quota['memory']) - int(usage['memory']) restdisk = int(quota['disk']) - int(usage['disk']) if restcpu >= int(default['cpu']): defaultcpu = default['cpu'] elif restcpu <= 0: defaultcpu = "0" else: defaultcpu = str(restcpu) if restmemory >= int(default['memory']): defaultmemory = default['memory'] elif restmemory <= 0: defaultmemory = "0" else: defaultmemory = str(restmemory) if restdisk >= int(default['disk']): defaultdisk = default['disk'] elif restdisk <= 0: defaultdisk = "0" else: defaultdisk = str(restdisk) defaultsetting = { 'cpu': defaultcpu, 'memory': defaultmemory, 'disk': defaultdisk } return self.render("config.html", allimages = allimages, allclusters = allclusters_info, mysession=dict(session), quota = quota, usage = usage, defaultsetting = defaultsetting, masterips = masterips) @classmethod def post(self): return self.get() class addPortMappingView(normalView): template_path = "error.html" @classmethod def post(self): data = {"clustername":request.form["clustername"],"node_name":request.form["node_name"],"node_ip":request.form["node_ip"],"node_port":request.form["node_port"]} result = dockletRequest.post('/port_mapping/add/',data, self.masterip) success = result.get("success") if success == "true": return redirect("/config/") else: return self.render(self.template_path, message = result.get("message")) @classmethod def get(self): return self.post() class delPortMappingView(normalView): template_path = "error.html" @classmethod def post(self): data = {"clustername":self.clustername,"node_name":self.node_name,"node_port":self.node_port} result = dockletRequest.post('/port_mapping/delete/',data, self.masterip) success = result.get("success") if success == "true": return redirect("/config/") else: return self.render(self.template_path, message = result.get("message")) @classmethod def get(self): return self.post() ================================================ FILE: web/webViews/cookie_tool.py ================================================ #!/usr/bin/python3 import json, hashlib, base64, time import sys from webViews.log import logger # generate cookie : # name = 'leebaok' # | # { "name":"leebaok", "login-time":time} Secure-Key # | | # | json.dumps | # | | # '{ "name":"leebaok", "login-time":time}' ______________| # | | concat # | encode('ascii') -> base64 | encode('ascii') -> md5().hexdigest() # | str(*, encoding='utf-8') | # | | # < XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX >.< XXXXXXXXXXXXXXXXXXXX > # def generate_cookie(name, securekey): #print (">> generate cookie for %s" % name) content = { 'name':name, 'login-time': time.asctime() } text = json.dumps(content) part1 = base64.b64encode(text.encode('ascii')) part2 = hashlib.md5( (text+securekey).encode('ascii') ).hexdigest() # part1 is binary(ascii) and part2 is str(utf-8) cookie = str(part1, encoding='utf-8') +"."+ part2 #print ("cookie : %s" % cookie) return cookie def parse_cookie(cookie, securekey): logger.info (">> parse cookie : %s" % cookie) parts = cookie.split('.') part1 = parts[0] part2 = '' if len(parts) < 2 else parts[1] try: text = str(base64.b64decode(part1.encode('ascii')), encoding='utf-8') except: logger.info ("decode cookie failed") return None logger.info ("cookie content : %s" % text) thatpart2 = hashlib.md5((text+securekey).encode('ascii')).hexdigest() logger.info ("hash from part1 : %s" % thatpart2) logger.info ("hash from part2 : %s" % part2) if part2 == thatpart2: result = json.loads(text)['name'] else: result = None logger.info ("parse from cookie : %s" % result) return result ================================================ FILE: web/webViews/dashboard.py ================================================ from flask import session,render_template from webViews.view import normalView from webViews.dockletrequest import dockletRequest class dashboardView(normalView): template_path = "dashboard.html" @classmethod def get(self): result = dockletRequest.post_to_all('/cluster/list/') desc = dockletRequest.getalldesc() allclusters={} for master in result: clusters = result[master].get("clusters") full_clusters = [] data={} for cluster in clusters: data["clustername"] = cluster single_cluster = {} single_cluster['name'] = cluster message = dockletRequest.post("/cluster/info/", data , master.split("@")[0]) if(message): message = message.get("message") single_cluster['status'] = message['status'] single_cluster['id'] = message['clusterid'] single_cluster['proxy_public_ip'] = message['proxy_public_ip'] full_clusters.append(single_cluster) else: self.error() allclusters[master] = full_clusters return self.render(self.template_path, allclusters = allclusters, desc=desc) #else: # self.error() @classmethod def post(self): return self.get() ================================================ FILE: web/webViews/dockletrequest.py ================================================ import requests from flask import abort, session from webViews.log import logger import os,sys,inspect,traceback this_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe ()))[0])) src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../..", "src"))) if src_folder not in sys.path: sys.path.insert(0, src_folder) from utils import env masterips=env.getenv('MASTER_IPS').split(",") user_endpoint = "http://" + env.getenv('USER_IP') + ":" + str(env.getenv('USER_PORT')) master_port=str(env.getenv('MASTER_PORT')) def getip(masterip): return masterip.split("@")[0] def getname(masterip): return masterip.split("@")[1] class dockletRequest(): @classmethod def post(self, url = '/', data = {}, endpoint = "http://0.0.0.0:9000"): #try: data = dict(data) data['token'] = session['token'] logger.info ("Docklet Request: user = %s data = %s, url = %s"%(session['username'], data, url)) reqtype = url.split("/")[1] userreq = { 'login', 'external_login', 'register', 'user', 'beans', 'notification', 'settings', 'bug' } if ":" not in endpoint: endpoint = "http://"+endpoint+":"+master_port if reqtype in userreq: result = requests.post(user_endpoint + url, data=data).json() else: result = requests.post(endpoint + url, data=data).json() # logger.info('response content: %s'%response.content) # result = response.json() if (result.get('success', None) == "false" and result.get('reason', None) == "Unauthorized Action"): abort(401) if (result.get('Unauthorized', None) == 'True'): session['401'] = 'Token Expired' abort(401) logstr = "Docklet Response: user = %s result = %s, url = %s" % (session['username'], result, url) if (sys.getsizeof(logstr) > 512): logstr = "Docklet Response: user = %s, url = %s"%(session['username'], url) logger.info(logstr) return result #except: #abort(500) @classmethod def getdesc(self,mastername): return env.getenv(mastername+"_desc")[1:-1] @classmethod def getalldesc(self): masterips = self.post_to_all() res={} for masterip in masterips: mastername = getname(masterip) res[mastername]=env.getenv(mastername+"_desc") return res @classmethod def post_to_all(self, url = '/', data={}): if (url == '/'): res = [] for masterip in masterips: try: requests.post("http://"+getip(masterip)+":"+master_port+"/isalive/",data=data) except Exception as e: logger.debug(e) continue res.append(masterip) return res data = dict(data) data['token'] = session['token'] logger.info("Docklet Request: user = %s data = %s, url = %s"%(session['username'], data, url)) result = {} for masterip in masterips: try: res = requests.post("http://"+getip(masterip)+":"+master_port+url,data=data).json() except Exception as e: logger.debug(traceback.format_exc()) continue if 'success' in res and res['success'] == 'true': result[masterip] = res logger.info("get result from %s success" % getip(masterip)) else: logger.error("get result from %s failed" % getip(masterip)) return result @classmethod def unauthorizedpost(self, url = '/', data = None): data = dict(data) data_log = {'user': data.get('user', 'external')} logger.info("Docklet Unauthorized Request: data = %s, url = %s" % (data_log, url)) result = requests.post(user_endpoint + url, data = data).json() logger.info("Docklet Unauthorized Response: result = %s, url = %s"%(result, url)) return result ================================================ FILE: web/webViews/log.py ================================================ #!/usr/bin/env python import logging import logging.handlers import argparse import sys import time # this is only being used as part of the example import os import os, sys, inspect this_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])) src_folder = os.path.realpath(os.path.abspath(os.path.join(this_folder,"../..", "src"))) if src_folder not in sys.path: sys.path.insert(0, src_folder) from utils import env # logger should only be imported after initlogging has been called logger = None def initlogging(name='docklet'): # Deafults global logger homepath = env.getenv('FS_PREFIX') LOG_FILENAME = homepath + '/local/log/' + name + '.log' LOG_LEVEL = env.getenv('WEB_LOG_LEVEL') if LOG_LEVEL == "DEBUG": LOG_LEVEL = logging.DEBUG elif LOG_LEVEL == "INFO": LOG_LEVEL = logging.INFO elif LOG_LEVEL == "WARNING": LOG_LEVEL = logging.WARNING elif LOG_LEVEL == "ERROR": LOG_LEVEL = logging.ERROR elif LOG_LEVEL == "CRITICAL": LOG_LEVEL = logging.CRITIAL else: LOG_LEVEL = logging.DEBUG logger = logging.getLogger(name) # Configure logging to log to a file, making a new file at midnight and keeping the last 3 day's data # Give the logger a unique name (good practice) # Set the log level to LOG_LEVEL logger.setLevel(LOG_LEVEL) # Make a handler that writes to a file, making a new file at midnight and keeping 3 backups handler = logging.handlers.TimedRotatingFileHandler(LOG_FILENAME, when="midnight", backupCount=0, encoding='utf-8') # Format each log message like this formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(module)s[%(lineno)d] %(message)s') # Attach the formatter to the handler handler.setFormatter(formatter) # Attach the handler to the logger logger.addHandler(handler) # Replace stdout with logging to file at INFO level sys.stdout = RedirectLogger(logger, logging.INFO) # Replace stderr with logging to file at ERROR level sys.stderr = RedirectLogger(logger, logging.ERROR) # Make a class we can use to capture stdout and sterr in the log class RedirectLogger(object): def __init__(self, logger, level): """Needs a logger and a logger level.""" self.logger = logger self.level = level def write(self, message): # Only log if there is a message (not just a new line) if message.rstrip() != "": self.logger.log(self.level, message.rstrip()) def flush(self): for handler in self.logger.handlers: handler.flush() ================================================ FILE: web/webViews/monitor.py ================================================ from flask import session from webViews.view import normalView from webViews.dockletrequest import dockletRequest class statusView(normalView): template_path = "monitor/status.html" @classmethod def get(self): data = {} allclusters = dockletRequest.post_to_all('/cluster/list/') for master in allclusters: allclusters[master] = allclusters[master].get('clusters') result = dockletRequest.post('/user/selfQuery/') quotas = result['data']['groupinfo'] quotanames = quotas.keys() '''result = dockletRequest.post('/monitor/user/quotainfo/', data) quotainfo = result.get('quotainfo') quotainfo['cpu'] = int(int(quotainfo['cpu'])) print(quotainfo)''' allcontainers = {} if (result): for master in allclusters: allcontainers[master] = {} for cluster in allclusters[master]: data["clustername"] = cluster message = dockletRequest.post('/cluster/info/', data, master.split("@")[0]) if (message): message = message.get('message') else: self.error() allcontainers[master][cluster] = message message = dockletRequest.post('/batch/vnodes/list/', data, master.split("@")[0]) message = message.get('data') containers = [] for m in message: container = {} container['containername'] = m container['ip'] = '--' containers.append(container) tmp = {} tmp['containers'] = containers tmp['status'] = 'running' allcontainers[master]['Batch_Job'] = tmp return self.render(self.template_path, quotas = quotas, quotanames = quotanames, allcontainers = allcontainers, user = session['username']) else: self.error() class statusRealtimeView(normalView): template_path = "monitor/statusRealtime.html" node_name = "" @classmethod def get(self): masterip = self.masterip data = { "user": session['username'], } result = dockletRequest.post('/monitor/vnodes/%s/basic_info/'%(self.node_name), data, masterip) basic_info = result.get('monitor').get('basic_info') return self.render(self.template_path, node_name = self.node_name, user = session['username'], container = basic_info, masterip=masterip) class historyView(normalView): template_path = "monitor/history.html" @classmethod def get(self): data = { "user": session['username'], } allvnodes = {} result = dockletRequest.post_to_all('/monitor/user/createdvnodes/', data) for master in result: allvnodes[master] = result[master].get('createdvnodes') return self.render(self.template_path, user = session['username'],allvnodes = allvnodes) class historyVNodeView(normalView): template_path = "monitor/historyVNode.html" vnode_name = "" @classmethod def get(self): masterip = self.masterip data = { "user": session['username'], } result = dockletRequest.post('/monitor/vnodes/%s/history/'%(self.vnode_name), data, masterip) history = result.get('monitor').get('history') return self.render(self.template_path, vnode_name = self.vnode_name, user = session['username'], history = history) class hostsRealtimeView(normalView): template_path = "monitor/hostsRealtime.html" com_ip = "" @classmethod def get(self): masterip = self.masterip data = { "user": session['username'], } result = dockletRequest.post('/monitor/hosts/%s/cpuconfig/'%(self.com_ip), data,masterip) proc = result.get('monitor').get('cpuconfig') result = dockletRequest.post('/monitor/hosts/%s/osinfo/'%(self.com_ip), data,masterip) osinfo = result.get('monitor').get('osinfo') result = dockletRequest.post('/monitor/hosts/%s/diskinfo/'%(self.com_ip), data,masterip) diskinfos = result.get('monitor').get('diskinfo') return self.render(self.template_path, com_ip = self.com_ip, user = session['username'],processors = proc, OSinfo = osinfo, diskinfos = diskinfos, masterip = masterip) class hostsConAllView(normalView): template_path = "monitor/hostsConAll.html" com_ip = "" @classmethod def get(self): masterip = self.masterip data = { "user": session['username'], } result = dockletRequest.post('/monitor/hosts/%s/containerslist/'%(self.com_ip), data, masterip) containers = result.get('monitor').get('containerslist') containerslist = [] for container in containers: result = dockletRequest.post('/monitor/vnodes/%s/basic_info/'%(container), data, masterip) basic_info = result.get('monitor').get('basic_info') result = dockletRequest.post('/monitor/vnodes/%s/owner/'%(container), data, masterip) owner = result.get('monitor') basic_info['owner'] = owner containerslist.append(basic_info) return self.render(self.template_path, containerslist = containerslist, com_ip = self.com_ip, user = session['username'], masterip = masterip) class hostsView(normalView): template_path = "monitor/hosts.html" @classmethod def get(self): data = { "user": session['username'], } allresult = dockletRequest.post_to_all('/monitor/listphynodes/', data) allmachines = {} for master in allresult: allmachines[master] = [] iplist = allresult[master].get('monitor').get('allnodes') for ip in iplist: containers = {} result = dockletRequest.post('/monitor/hosts/%s/containers/'%(ip), data, master.split("@")[0]) containers = result.get('monitor').get('containers') result = dockletRequest.post('/monitor/hosts/%s/status/'%(ip), data, master.split("@")[0]) status = result.get('monitor').get('status') allmachines[master].append({'ip':ip,'containers':containers, 'status':status}) #print(machines) return self.render(self.template_path, allmachines = allmachines, user = session['username']) class monitorUserAllView(normalView): template_path = "monitor/monitorUserAll.html" @classmethod def get(self): data = { "user": session['username'], } result = dockletRequest.post('/monitor/listphynodes/', data) userslist = [{'name':'root'},{'name':'libao'}] for user in userslist: result = dockletRequest.post('/monitor/user/%s/clustercnt/'%(user['name']), data) user['clustercnt'] = result.get('monitor').get('clustercnt') return self.render(self.template_path, userslist = userslist, user = session['username']) ================================================ FILE: web/webViews/notification/notification.py ================================================ import json from flask import session, render_template, redirect, request from webViews.view import normalView from webViews.dockletrequest import dockletRequest class NotificationView(normalView): template_path = 'notification.html' @classmethod def get(cls): result = dockletRequest.post('/notification/list/') groups = dockletRequest.post('/user/groupNameList/')['groups'] notifications = result['data'] return cls.render(cls.template_path, notifications=notifications, groups=groups) class CreateNotificationView(normalView): template_path = 'create_notification.html' @classmethod def get(cls): groups = dockletRequest.post('/user/groupNameList/')['groups'] return cls.render(cls.template_path, groups=groups) @classmethod def post(cls): dockletRequest.post('/notification/create/', request.form) # return redirect('/admin/') return redirect('/notification/') class QuerySelfNotificationsView(normalView): @classmethod def post(cls): result = dockletRequest.post('/notification/query_self/') return json.dumps(result) class QueryNotificationView(normalView): template_path = 'notification_info.html' @classmethod def get_by_id(cls, notify_id): notifies = [] if notify_id == 'all': notifies.extend(dockletRequest.post('/notification/query/all/')['data']) else: notifies.append(dockletRequest.post('/notification/query/', data={'notify_id': notify_id})['data']) return cls.render(cls.template_path, notifies=notifies) class ModifyNotificationView(normalView): @classmethod def post(cls): dockletRequest.post('/notification/modify/', request.form) return redirect('/notification/') class DeleteNotificationView(normalView): @classmethod def post(cls): dockletRequest.post('/notification/delete/', request.form) return redirect('/notification/') ================================================ FILE: web/webViews/reportbug.py ================================================ from flask import session,render_template,request,redirect from webViews.view import normalView from webViews.dockletrequest import dockletRequest class reportBugView(normalView): template_path = "opsuccess.html" @classmethod def get(self): dockletRequest.post("/bug/report/", {'bugmessage': self.bugmessage}) return self.render(self.template_path, message="Thank You!") @classmethod def post(self): return self.get() ================================================ FILE: web/webViews/syslogs.py ================================================ from flask import session,render_template,redirect, request from webViews.view import normalView from webViews.dockletrequest import dockletRequest class logsView(normalView): template_path = "logs.html" @classmethod def get(self): logs = dockletRequest.post('/logs/list/')['result'] logs.sort() logs.sort(key = len) return self.render(self.template_path, logs = logs) ================================================ FILE: web/webViews/user/grouplist.py ================================================ from flask import redirect, request from webViews.dockletrequest import dockletRequest from webViews.view import normalView import json class grouplistView(normalView): template_path = "user/grouplist.html" class groupdetailView(normalView): @classmethod def post(self): return json.dumps(dockletRequest.post('/user/groupList/')) class groupqueryView(normalView): @classmethod def post(self): return json.dumps(dockletRequest.post('/user/groupQuery/', request.form)) class groupmodifyView(normalView): @classmethod def post(self): result = json.dumps(dockletRequest.post('/user/groupModify/', request.form)) return redirect('/settings/') ================================================ FILE: web/webViews/user/userActivate.py ================================================ from flask import render_template, redirect, request from webViews.dockletrequest import dockletRequest from webViews.view import normalView class userActivateView(normalView): template_path = 'user/activate.html' @classmethod def get(self): userinfo = dockletRequest.post('/user/selfQuery/') userinfo = userinfo["data"] if (userinfo["description"] == ''): userinfo["description"] = "Describe why you want to use Docklet" return self.render(self.template_path, info = userinfo) @classmethod def post(self): dockletRequest.post('/register/', request.form) return redirect('/logout/') ================================================ FILE: web/webViews/user/userinfo.py ================================================ from flask import redirect, request from webViews.dockletrequest import dockletRequest from webViews.authenticate import login from webViews.view import normalView import json class userinfoView(normalView): template_path = "user/info.html" @classmethod def get(self): userinfo = dockletRequest.post('/user/selfQuery/') userinfo = userinfo["data"] return self.render(self.template_path, info = userinfo) @classmethod def post(self): result = json.dumps(dockletRequest.post('/user/selfModify/', request.form)) login.refreshInfo() return result ================================================ FILE: web/webViews/user/userlist.py ================================================ from flask import render_template, redirect, request from webViews.dockletrequest import dockletRequest from webViews.view import normalView import json class userlistView(normalView): template_path = "user_list.html" @classmethod def get(self): groups = dockletRequest.post('/user/groupNameList/')["groups"] applications = dockletRequest.post('/beans/admin/applymsgs/').get("applymsgs") return self.render(self.template_path, groups = groups, applications = applications) @classmethod def post(self): return json.dumps(dockletRequest.post('/user/data/')) class useraddView(normalView): @classmethod def post(self): dockletRequest.post('/user/add/', request.form) return redirect('/user/list/') class userdataView(normalView): @classmethod def get(self): return json.dumps(dockletRequest.post('/user/data/', request.form)) @classmethod def post(self): return json.dumps(dockletRequest.post('/user/data/', request.form)) class userqueryView(normalView): @classmethod def get(self): return json.dumps(dockletRequest.post('/user/query/', request.form)) @classmethod def post(self): return json.dumps(dockletRequest.post('/user/query/', request.form)) class usermodifyView(normalView): @classmethod def post(self): try: dockletRequest.post('/user/modify/', request.form) except: return self.render('user/mailservererror.html') return redirect('/user/list/') ================================================ FILE: web/webViews/view.py ================================================ from flask import render_template, request, abort, session from webViews.dockletrequest import dockletRequest import os, inspect this_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])) version_file = open(this_folder + '/../../VERSION') version = version_file.read() version_file.close() class normalView(): template_path = "dashboard.html" @classmethod def get(self): return self.render(self.template_path) @classmethod def post(self): return self.render(self.template_path) @classmethod def error(self): abort(404) @classmethod def as_view(self): if request.method == 'GET': return self.get() elif request.method == 'POST': return self.post() else: return self.error() @classmethod def render(self, *args, **kwargs): self.mysession = dict(session) kwargs['mysession'] = self.mysession kwargs['version'] = version result = dockletRequest.post("/user/selfQuery/",{}) kwargs['beans'] = result.get("data").get("beans") return render_template(*args, **kwargs)