Browse Source

Merge remote-tracking branch 'upstream/dev-20190415' into dev-20190415

gongzijian 6 years ago
parent
commit
e441e124ec

+ 70 - 0
CONTRIBUTING.md

@@ -0,0 +1,70 @@
+EasyScheduler提交代码流程
+=====
+* 首先从远端仓库*https://github.com/analysys/EasyScheduler.git* fork一份代码到自己的仓库中
+
+* 远端仓库中目前有三个分支:
+    * master 正常交付分支
+    * dev    日常开发分支
+    * branch-1.0.0 发布版本分支
+
+* 把自己仓库clone到本地
+  
+    `git clone https://github.com/**/EasyScheduler.git`
+
+*  添加远端仓库地址,命名为upstream
+
+    ` git remote add upstream https://github.com/analysys/EasyScheduler.git `
+
+*  查看仓库:
+
+    ` git remote -v`
+
+> 此时会有两个仓库:origin(自己的仓库)和upstream(远端仓库)
+
+*  获取远端仓库代码(已经是最新代码,就跳过)
+  
+    `git fetch upstream `
+
+*  更新远端仓库代码
+
+```
+git checkout upstream/dev
+
+git pull upstream dev
+```
+
+* 同步远端仓库代码到本地仓库
+
+```
+ git checkout origin/dev
+ git merge --no-ff upstream/dev
+
+```
+
+* 在本地修改代码以后,提交到自己仓库:
+  
+    `git ca -m 'test commit'`
+    `git push`
+
+* 将修改提交到远端仓库
+
+	* 在github页面,点击New pull request.
+		<p align="center">
+	   <img src="http://geek.analysys.cn/static/upload/221/2019-04-02/90f3abbf-70ef-4334-b8d6-9014c9cf4c7f.png" width="60%" />
+	 </p>
+	 
+	* 选择修改完的本地分支和要合并过去的分支,Create pull request.
+		<p align="center">
+	   <img src="http://geek.analysys.cn/static/upload/221/2019-04-02/fe7eecfe-2720-4736-951b-b3387cf1ae41.png" width="60%" />
+	 </p>
+	* 接下来由管理员负责将**Merge**完成此次pull request
+
+
+
+
+
+
+
+
+
+

+ 41 - 0
Dockerfile

@@ -0,0 +1,41 @@
+#Maintin by jimmy
+#Email: zhengge2012@gmail.com
+FROM anapsix/alpine-java:8_jdk
+WORKDIR /tmp
+RUN wget http://archive.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.tar.gz
+RUN tar -zxvf apache-maven-3.6.1-bin.tar.gz && rm apache-maven-3.6.1-bin.tar.gz 
+RUN mv apache-maven-3.6.1 /usr/lib/mvn
+RUN chown -R root:root /usr/lib/mvn
+RUN ln -s /usr/lib/mvn/bin/mvn /usr/bin/mvn
+RUN wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz
+RUN tar -zxvf zookeeper-3.4.6.tar.gz
+RUN mv zookeeper-3.4.6 /opt/zookeeper
+RUN rm -rf zookeeper-3.4.6.tar.gz
+RUN echo "export ZOOKEEPER_HOME=/opt/zookeeper" >>/etc/profile
+RUN echo "export PATH=$PATH:$ZOOKEEPER_HOME/bin"  >>/etc/profile
+ADD conf/zoo.cfg /opt/zookeeper/conf/zoo.cfg
+#RUN source /etc/profile
+#RUN zkServer.sh start
+RUN apk add --no-cache git npm nginx mariadb mariadb-client mariadb-server-utils pwgen
+WORKDIR /opt
+RUN git clone https://github.com/analysys/EasyScheduler.git
+WORKDIR /opt/EasyScheduler
+RUN mvn -U clean package assembly:assembly -Dmaven.test.skip=true
+RUN mv /opt/EasyScheduler/target/escheduler-1.0.0-SNAPSHOT /opt/easyscheduler
+WORKDIR /opt/EasyScheduler/escheduler-ui
+RUN npm install
+RUN npm audit fix
+RUN npm run build
+RUN mkdir -p /opt/escheduler/front/server
+RUN cp -rfv dist/* /opt/escheduler/front/server
+WORKDIR /
+RUN rm -rf /opt/EasyScheduler
+#configure mysql server https://github.com/yobasystems/alpine-mariadb/tree/master/alpine-mariadb-amd64
+ADD conf/run.sh /scripts/run.sh
+RUN mkdir /docker-entrypoint-initdb.d && \
+    mkdir /scripts/pre-exec.d && \
+    mkdir /scripts/pre-init.d && \
+    chmod -R 755 /scripts
+RUN rm -rf /var/cache/apk/*
+EXPOSE 8888
+ENTRYPOINT ["/scripts/run.sh"]

+ 13 - 1
README.md

@@ -45,12 +45,24 @@ Easy Scheduler
 
 更多文档请参考 <a href="https://analysys.github.io/easyscheduler_docs_cn/" target="_blank">easyscheduler中文在线文档</a>
 
+
+### 近期研发计划
+
+EasyScheduler的工作计划:<a href="https://github.com/analysys/EasyScheduler/projects/1" target="_blank">研发计划</a> ,其中 In Develop卡片下是1.0.2版本的功能,TODO卡片是待做事项(包括 feature ideas)
+
+### 贡献代码
+
+非常欢迎大家来参与贡献代码,提交代码流程请参考:
+https://github.com/analysys/EasyScheduler/blob/master/CONTRIBUTING.md
+
+
 ### 感谢
 
-- Easy Scheduler使用了很多优秀的开源项目,比如google的guava、guice、grpc,netty,ali的bonecp,quartz,以及apache的众多开源项目等等,
+Easy Scheduler使用了很多优秀的开源项目,比如google的guava、guice、grpc,netty,ali的bonecp,quartz,以及apache的众多开源项目等等,
 正是由于站在这些开源项目的肩膀上,才有Easy Scheduler的诞生的可能。对此我们对使用的所有开源软件表示非常的感谢!我们也希望自己不仅是开源的受益者,也能成为开源的
 贡献者,于是我们决定把易调度贡献出来,并承诺长期维护。也希望对开源有同样热情和信念的伙伴加入进来,一起为开源献出一份力!
 
+
 ### 帮助
 The fastest way to get response from our developers is to submit issues,   or add our wechat : 510570367
  

+ 31 - 0
conf/escheduler.conf

@@ -0,0 +1,31 @@
+server {
+    listen       8888;# 访问端口
+    server_name  localhost;
+    #charset koi8-r;
+    #access_log  /var/log/nginx/host.access.log  main;
+    location / {
+        root   /opt/escheduler/front/server; # 静态文件目录
+        index  index.html index.html;
+    }
+    location /escheduler {
+        proxy_pass http://127.0.0.1:12345; # 接口地址
+        proxy_set_header Host $host;
+        proxy_set_header X-Real-IP $remote_addr;
+        proxy_set_header x_real_ipP $remote_addr;
+        proxy_set_header remote_addr $remote_addr;
+        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+        proxy_http_version 1.1;
+        proxy_connect_timeout 4s;
+        proxy_read_timeout 30s;
+        proxy_send_timeout 12s;
+        proxy_set_header Upgrade $http_upgrade;
+        proxy_set_header Connection "upgrade";
+    }
+    #error_page  404              /404.html;
+    # redirect server error pages to the static page /50x.html
+    #
+    error_page   500 502 503 504  /50x.html;
+    location = /50x.html {
+        root   /usr/share/nginx/html;
+    }
+}

+ 310 - 0
conf/install.sh

@@ -0,0 +1,310 @@
+#!/bin/sh
+
+workDir=`/opt/easyscheduler`
+workDir=`cd ${workDir};pwd`
+
+#To be compatible with MacOS and Linux
+txt=""
+if [[ "$OSTYPE" == "darwin"* ]]; then
+    # Mac OSX
+    txt="''"
+elif [[ "$OSTYPE" == "linux-gnu" ]]; then
+    # linux
+    txt=""
+elif [[ "$OSTYPE" == "cygwin" ]]; then
+    # POSIX compatibility layer and Linux environment emulation for Windows
+    echo "Easy Scheduler not support Windows operating system"
+    exit 1
+elif [[ "$OSTYPE" == "msys" ]]; then
+    # Lightweight shell and GNU utilities compiled for Windows (part of MinGW)
+    echo "Easy Scheduler not support Windows operating system"
+    exit 1
+elif [[ "$OSTYPE" == "win32" ]]; then
+    echo "Easy Scheduler not support Windows operating system"
+    exit 1
+elif [[ "$OSTYPE" == "freebsd"* ]]; then
+    # ...
+    txt=""
+else
+    # Unknown.
+    echo "Operating system unknown, please tell us(submit issue) for better service"
+    exit 1
+fi
+
+source ${workDir}/conf/config/run_config.conf
+source ${workDir}/conf/config/install_config.conf
+
+# mysql配置
+# mysql 地址,端口
+mysqlHost="127.0.0.1:3306"
+
+# mysql 数据库名称
+mysqlDb="easyscheduler"
+
+# mysql 用户名
+mysqlUserName="easyscheduler"
+
+# mysql 密码
+mysqlPassword="easyschedulereasyscheduler"
+
+# conf/config/install_config.conf配置
+# 安装路径,不要当前路径(pwd)一样
+installPath="/opt/easyscheduler"
+
+# 部署用户
+deployUser="escheduler"
+
+# zk集群
+zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
+
+# 安装hosts
+ips="ark0,ark1,ark2,ark3,ark4"
+
+# conf/config/run_config.conf配置
+# 运行Master的机器
+masters="ark0,ark1"
+
+# 运行Worker的机器
+workers="ark2,ark3,ark4"
+
+# 运行Alert的机器
+alertServer="ark3"
+
+# 运行Api的机器
+apiServers="ark1"
+
+# alert配置
+# 邮件协议
+mailProtocol="SMTP"
+
+# 邮件服务host
+mailServerHost="smtp.exmail.qq.com"
+
+# 邮件服务端口
+mailServerPort="25"
+
+# 发送人
+mailSender="xxxxxxxxxx"
+
+# 发送人密码
+mailPassword="xxxxxxxxxx"
+
+# 下载Excel路径
+xlsFilePath="/tmp/xls"
+
+
+# hadoop 配置
+# 是否启动hdfs,如果启动则为true,需要配置以下hadoop相关参数;
+# 不启动设置为false,如果为false,以下配置不需要修改
+hdfsStartupSate="false"
+
+# namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下
+namenodeFs="hdfs://mycluster:8020"
+
+# resourcemanager HA配置,如果是单resourcemanager,这里为空即可
+yarnHaIps="192.168.xx.xx,192.168.xx.xx"
+
+# 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好
+singleYarnIp="ark1"
+
+# hdfs根路径,根路径的owner必须是部署用户
+hdfsPath="/escheduler"
+
+# common 配置
+# 程序路径
+programPath="/tmp/escheduler"
+
+#下载路径
+downloadPath="/tmp/escheduler/download"
+
+# 任务执行路径
+execPath="/tmp/escheduler/exec"
+
+# SHELL环境变量路径
+shellEnvPath="$installPath/conf/env/.escheduler_env.sh"
+
+# Python换将变量路径
+pythonEnvPath="$installPath/conf/env/escheduler_env.py"
+
+# 资源文件的后缀
+resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
+
+# 开发状态,如果是true,对于SHELL脚本可以在execPath目录下查看封装后的SHELL脚本,如果是false则执行完成直接删除
+devState="true"
+
+# zk 配置
+# zk根目录
+zkRoot="/escheduler"
+
+# 用来记录挂掉机器的zk目录
+zkDeadServers="/escheduler/dead-servers"
+
+# masters目录
+zkMasters="/escheduler/masters"
+
+# workers目录
+zkWorkers="/escheduler/workers"
+
+# zk master分布式锁
+mastersLock="/escheduler/lock/masters"
+
+# zk worker分布式锁
+workersLock="/escheduler/lock/workers"
+
+# zk master容错分布式锁
+mastersFailover="/escheduler/lock/failover/masters"
+
+# zk worker容错分布式锁
+workersFailover="/escheduler/lock/failover/masters"
+
+# zk session 超时
+zkSessionTimeout="300"
+
+# zk 连接超时
+zkConnectionTimeout="300"
+
+# zk 重试间隔
+zkRetrySleep="100"
+
+# zk重试最大次数
+zkRetryMaxtime="5"
+
+
+# master 配置
+# master执行线程最大数,流程实例的最大并行度
+masterExecThreads="100"
+
+# master任务执行线程最大数,每一个流程实例的最大并行度
+masterExecTaskNum="20"
+
+# master心跳间隔
+masterHeartbeatInterval="10"
+
+# master任务提交重试次数
+masterTaskCommitRetryTimes="5"
+
+# master任务提交重试时间间隔
+masterTaskCommitInterval="100"
+
+# master最大cpu平均负载,用来判断master是否还有执行能力
+masterMaxCupLoadAvg="10"
+
+# master预留内存,用来判断master是否还有执行能力
+masterReservedMemory="1"
+
+
+# worker 配置
+# worker执行线程
+workerExecThreads="100"
+
+# worker心跳间隔
+workerHeartbeatInterval="10"
+
+# worker一次抓取任务数
+workerFetchTaskNum="10"
+
+# worker最大cpu平均负载,用来判断master是否还有执行能力
+workerMaxCupLoadAvg="10"
+
+# worker预留内存,用来判断master是否还有执行能力
+workerReservedMemory="1"
+
+# api 配置
+# api 服务端口
+apiServerPort="12345"
+
+# api session 超时
+apiServerSessionTimeout="7200"
+
+# api 上下文路径
+apiServerContextPath="/escheduler/"
+
+# spring 最大文件大小
+springMaxFileSize="1024MB"
+
+# spring 最大请求文件大小
+springMaxRequestSize="1024MB"
+
+# api 最大post请求大小
+apiMaxHttpPostSize="5000000"
+
+# 1,替换文件
+echo "1,替换文件"
+sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/dao/data_source.properties
+sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${mysqlUserName}#g" conf/dao/data_source.properties
+sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${mysqlPassword}#g" conf/dao/data_source.properties
+
+sed -i ${txt} "s#org.quartz.dataSource.myDs.URL.*#org.quartz.dataSource.myDs.URL=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/quartz.properties
+sed -i ${txt} "s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.user=${mysqlUserName}#g" conf/quartz.properties
+sed -i ${txt} "s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=${mysqlPassword}#g" conf/quartz.properties
+
+
+sed -i ${txt} "s#fs.defaultFS.*#fs.defaultFS=${namenodeFs}#g" conf/common/hadoop/hadoop.properties
+sed -i ${txt} "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common/hadoop/hadoop.properties
+sed -i ${txt} "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:8088/ws/v1/cluster/apps/%s#g" conf/common/hadoop/hadoop.properties
+
+sed -i ${txt} "s#data.basedir.path.*#data.basedir.path=${programPath}#g" conf/common/common.properties
+sed -i ${txt} "s#data.download.basedir.path.*#data.download.basedir.path=${downloadPath}#g" conf/common/common.properties
+sed -i ${txt} "s#process.exec.basepath.*#process.exec.basepath=${execPath}#g" conf/common/common.properties
+sed -i ${txt} "s#data.store2hdfs.basepath.*#data.store2hdfs.basepath=${hdfsPath}#g" conf/common/common.properties
+sed -i ${txt} "s#hdfs.startup.state.*#hdfs.startup.state=${hdfsStartupSate}#g" conf/common/common.properties
+sed -i ${txt} "s#escheduler.env.path.*#escheduler.env.path=${shellEnvPath}#g" conf/common/common.properties
+sed -i ${txt} "s#escheduler.env.py.*#escheduler.env.py=${pythonEnvPath}#g" conf/common/common.properties
+sed -i ${txt} "s#resource.view.suffixs.*#resource.view.suffixs=${resSuffixs}#g" conf/common/common.properties
+sed -i ${txt} "s#development.state.*#development.state=${devState}#g" conf/common/common.properties
+
+sed -i ${txt} "s#zookeeper.quorum.*#zookeeper.quorum=${zkQuorum}#g" conf/zookeeper.properties
+sed -i ${txt} "s#zookeeper.escheduler.root.*#zookeeper.escheduler.root=${zkRoot}#g" conf/zookeeper.properties
+sed -i ${txt} "s#zookeeper.escheduler.dead.servers.*#zookeeper.escheduler.dead.servers=${zkDeadServers}#g" conf/zookeeper.properties
+sed -i ${txt} "s#zookeeper.escheduler.masters.*#zookeeper.escheduler.masters=${zkMasters}#g" conf/zookeeper.properties
+sed -i ${txt} "s#zookeeper.escheduler.workers.*#zookeeper.escheduler.workers=${zkWorkers}#g" conf/zookeeper.properties
+sed -i ${txt} "s#zookeeper.escheduler.lock.masters.*#zookeeper.escheduler.lock.masters=${mastersLock}#g" conf/zookeeper.properties
+sed -i ${txt} "s#zookeeper.escheduler.lock.workers.*#zookeeper.escheduler.lock.workers=${workersLock}#g" conf/zookeeper.properties
+sed -i ${txt} "s#zookeeper.escheduler.lock.failover.masters.*#zookeeper.escheduler.lock.failover.masters=${mastersFailover}#g" conf/zookeeper.properties
+sed -i ${txt} "s#zookeeper.escheduler.lock.failover.workers.*#zookeeper.escheduler.lock.failover.workers=${workersFailover}#g" conf/zookeeper.properties
+sed -i ${txt} "s#zookeeper.session.timeout.*#zookeeper.session.timeout=${zkSessionTimeout}#g" conf/zookeeper.properties
+sed -i ${txt} "s#zookeeper.connection.timeout.*#zookeeper.connection.timeout=${zkConnectionTimeout}#g" conf/zookeeper.properties
+sed -i ${txt} "s#zookeeper.retry.sleep.*#zookeeper.retry.sleep=${zkRetrySleep}#g" conf/zookeeper.properties
+sed -i ${txt} "s#zookeeper.retry.maxtime.*#zookeeper.retry.maxtime=${zkRetryMaxtime}#g" conf/zookeeper.properties
+
+sed -i ${txt} "s#master.exec.threads.*#master.exec.threads=${masterExecThreads}#g" conf/master.properties
+sed -i ${txt} "s#master.exec.task.number.*#master.exec.task.number=${masterExecTaskNum}#g" conf/master.properties
+sed -i ${txt} "s#master.heartbeat.interval.*#master.heartbeat.interval=${masterHeartbeatInterval}#g" conf/master.properties
+sed -i ${txt} "s#master.task.commit.retryTimes.*#master.task.commit.retryTimes=${masterTaskCommitRetryTimes}#g" conf/master.properties
+sed -i ${txt} "s#master.task.commit.interval.*#master.task.commit.interval=${masterTaskCommitInterval}#g" conf/master.properties
+sed -i ${txt} "s#master.max.cpuload.avg.*#master.max.cpuload.avg=${masterMaxCupLoadAvg}#g" conf/master.properties
+sed -i ${txt} "s#master.reserved.memory.*#master.reserved.memory=${masterReservedMemory}#g" conf/master.properties
+
+
+sed -i ${txt} "s#worker.exec.threads.*#worker.exec.threads=${workerExecThreads}#g" conf/worker.properties
+sed -i ${txt} "s#worker.heartbeat.interval.*#worker.heartbeat.interval=${workerHeartbeatInterval}#g" conf/worker.properties
+sed -i ${txt} "s#worker.fetch.task.num.*#worker.fetch.task.num=${workerFetchTaskNum}#g" conf/worker.properties
+sed -i ${txt} "s#worker.max.cpuload.avg.*#worker.max.cpuload.avg=${workerMaxCupLoadAvg}#g" conf/worker.properties
+sed -i ${txt} "s#worker.reserved.memory.*#worker.reserved.memory=${workerReservedMemory}#g" conf/worker.properties
+
+
+sed -i ${txt} "s#server.port.*#server.port=${apiServerPort}#g" conf/application.properties
+sed -i ${txt} "s#server.session.timeout.*#server.session.timeout=${apiServerSessionTimeout}#g" conf/application.properties
+sed -i ${txt} "s#server.context-path.*#server.context-path=${apiServerContextPath}#g" conf/application.properties
+sed -i ${txt} "s#spring.http.multipart.max-file-size.*#spring.http.multipart.max-file-size=${springMaxFileSize}#g" conf/application.properties
+sed -i ${txt} "s#spring.http.multipart.max-request-size.*#spring.http.multipart.max-request-size=${springMaxRequestSize}#g" conf/application.properties
+sed -i ${txt} "s#server.max-http-post-size.*#server.max-http-post-size=${apiMaxHttpPostSize}#g" conf/application.properties
+
+
+sed -i ${txt} "s#mail.protocol.*#mail.protocol=${mailProtocol}#g" conf/alert.properties
+sed -i ${txt} "s#mail.server.host.*#mail.server.host=${mailServerHost}#g" conf/alert.properties
+sed -i ${txt} "s#mail.server.port.*#mail.server.port=${mailServerPort}#g" conf/alert.properties
+sed -i ${txt} "s#mail.sender.*#mail.sender=${mailSender}#g" conf/alert.properties
+sed -i ${txt} "s#mail.passwd.*#mail.passwd=${mailPassword}#g" conf/alert.properties
+sed -i ${txt} "s#xls.file.path.*#xls.file.path=${xlsFilePath}#g" conf/alert.properties
+
+
+sed -i ${txt} "s#installPath.*#installPath=${installPath}#g" conf/config/install_config.conf
+sed -i ${txt} "s#deployUser.*#deployUser=${deployUser}#g" conf/config/install_config.conf
+sed -i ${txt} "s#ips.*#ips=${ips}#g" conf/config/install_config.conf
+
+
+sed -i ${txt} "s#masters.*#masters=${masters}#g" conf/config/run_config.conf
+sed -i ${txt} "s#workers.*#workers=${workers}#g" conf/config/run_config.conf
+sed -i ${txt} "s#alertServer.*#alertServer=${alertServer}#g" conf/config/run_config.conf
+sed -i ${txt} "s#apiServers.*#apiServers=${apiServers}#g" conf/config/run_config.conf

+ 105 - 0
conf/run.sh

@@ -0,0 +1,105 @@
+#!/bin/sh
+
+# execute any pre-init scripts
+for i in /scripts/pre-init.d/*sh
+do
+	if [ -e "${i}" ]; then
+		echo "[i] pre-init.d - processing $i"
+		. "${i}"
+	fi
+done
+
+if [ -d "/run/mysqld" ]; then
+	echo "[i] mysqld already present, skipping creation"
+	chown -R mysql:mysql /run/mysqld
+else
+	echo "[i] mysqld not found, creating...."
+	mkdir -p /run/mysqld
+	chown -R mysql:mysql /run/mysqld
+fi
+
+if [ -d /var/lib/mysql/mysql ]; then
+	echo "[i] MySQL directory already present, skipping creation"
+	chown -R mysql:mysql /var/lib/mysql
+else
+	echo "[i] MySQL data directory not found, creating initial DBs"
+
+	chown -R mysql:mysql /var/lib/mysql
+
+	mysql_install_db --user=mysql --ldata=/var/lib/mysql > /dev/null
+
+	if [ "$MYSQL_ROOT_PASSWORD" = "" ]; then
+		MYSQL_ROOT_PASSWORD=`pwgen 16 1`
+		echo "[i] MySQL root Password: $MYSQL_ROOT_PASSWORD"
+	fi
+
+	MYSQL_DATABASE="easyscheduler"
+	MYSQL_USER="easyscheduler"
+	MYSQL_PASSWORD="easyschedulereasyscheduler"
+
+	tfile=`mktemp`
+	if [ ! -f "$tfile" ]; then
+	    return 1
+	fi
+
+	cat << EOF > $tfile
+USE mysql;
+FLUSH PRIVILEGES ;
+GRANT ALL ON *.* TO 'root'@'%' identified by '$MYSQL_ROOT_PASSWORD' WITH GRANT OPTION ;
+GRANT ALL ON *.* TO 'root'@'localhost' identified by '$MYSQL_ROOT_PASSWORD' WITH GRANT OPTION ;
+SET PASSWORD FOR 'root'@'localhost'=PASSWORD('${MYSQL_ROOT_PASSWORD}') ;
+DROP DATABASE IF EXISTS test ;
+FLUSH PRIVILEGES ;
+EOF
+
+	if [ "$MYSQL_DATABASE" != "" ]; then
+	    echo "[i] Creating database: $MYSQL_DATABASE"
+	    echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` CHARACTER SET utf8 COLLATE utf8_general_ci;" >> $tfile
+
+	    if [ "$MYSQL_USER" != "" ]; then
+		echo "[i] Creating user: $MYSQL_USER with password $MYSQL_PASSWORD"
+		echo "GRANT ALL ON \`$MYSQL_DATABASE\`.* to '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD';" >> $tfile
+	    fi
+	fi
+
+	/usr/bin/mysqld --user=mysql --bootstrap --verbose=0 --skip-name-resolve --skip-networking=0 < $tfile
+	rm -f $tfile
+
+	for f in /docker-entrypoint-initdb.d/*; do
+		case "$f" in
+			*.sql)    echo "$0: running $f"; /usr/bin/mysqld --user=mysql --bootstrap --verbose=0 --skip-name-resolve --skip-networking=0 < "$f"; echo ;;
+			*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | /usr/bin/mysqld --user=mysql --bootstrap --verbose=0 --skip-name-resolve --skip-networking=0 < "$f"; echo ;;
+			*)        echo "$0: ignoring or entrypoint initdb empty $f" ;;
+		esac
+		echo
+	done
+
+	echo
+	echo 'MySQL init process done. Ready for start up.'
+	echo
+
+	echo "exec /usr/bin/mysqld --user=mysql --console --skip-name-resolve --skip-networking=0" "$@"
+fi
+
+# execute any pre-exec scripts
+for i in /scripts/pre-exec.d/*sh
+do
+	if [ -e "${i}" ]; then
+		echo "[i] pre-exec.d - processing $i"
+		. ${i}
+	fi
+done
+
+mysql -ueasyscheduler -peasyschedulereasyscheduler --one-database easyscheduler -h127.0.0.1 < /opt/easyscheduler/sql/escheduler.sql
+mysql -ueasyscheduler -peasyschedulereasyscheduler --one-database easyscheduler -h127.0.0.1 < /opt/easyscheduler/sql/quartz.sql
+source /etc/profile
+zkServer.sh start
+cd /opt/easyscheduler
+rm -rf /etc/nginx/conf.d/default.conf
+sh ./bin/escheduler-daemon.sh start master-server
+sh ./bin/escheduler-daemon.sh start worker-server
+sh ./bin/escheduler-daemon.sh start api-server
+sh ./bin/escheduler-daemon.sh start logger-server
+sh ./bin/escheduler-daemon.sh start alert-server
+nginx -c /etc/nginx/nginx.conf
+exec /usr/bin/mysqld --user=mysql --console --skip-name-resolve --skip-networking=0 $@

+ 30 - 0
conf/zoo.cfg

@@ -0,0 +1,30 @@
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial 
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between 
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just 
+# example sakes.
+dataDir=/tmp/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+# the maximum number of client connections.
+# increase this if you need to handle more clients
+#maxClientCnxns=60
+#
+# Be sure to read the maintenance section of the 
+# administrator guide before turning on autopurge.
+#
+# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+#
+# The number of snapshots to retain in dataDir
+#autopurge.snapRetainCount=3
+# Purge task interval in hours
+# Set to "0" to disable auto purge feature
+#autopurge.purgeInterval=1
+dataDir=/opt/zookeeper/data
+dataLogDir=/opt/zookeeper/logs

File diff suppressed because it is too large
+ 0 - 2390
docs/zh_CN/Easyscheduler文档.pdf


+ 38 - 0
docs/zh_CN/升级文档.md

@@ -0,0 +1,38 @@
+
+# EasyScheduler升级文档
+
+## 1. 备份上一版本文件和数据库。
+
+## 2. 停止escheduler所有服务.
+
+ `sh ./script/stop_all.sh`
+
+## 3. 下载新版本的安装包
+
+- [码云下载](https://gitee.com/easyscheduler/EasyScheduler/attach_files), 下载最新版本的前后端安装包(后端简称escheduler-backend、前端简称escheduler-ui)
+- 以下升级操作都需要在新版本的目录进行
+
+## 4. 数据库升级
+- 修改conf/dao/data_source.properties中的下列属性
+
+```
+    spring.datasource.url
+    spring.datasource.username
+    spring.datasource.password
+```
+
+- 执行数据库升级脚本
+
+`sh ./script/upgrade_escheduler.sh`
+
+## 5. 后端服务升级
+
+- 修改install.sh配置内容,执行升级脚本
+  
+  `sh install.sh`
+
+## 6. 前端服务升级
+- 覆盖上一版本dist目录
+- 重启nginx服务
+  
+    `systemctl restart nginx`

+ 7 - 0
docs/zh_CN/后端部署文档.md

@@ -67,7 +67,14 @@ escheduler  ALL=(ALL)       NOPASSWD: NOPASSWD: ALL
     ```
 
 * 1.0.2版本创建表和导入基础数据
+    修改conf/dao/data_source.properties中的下列属性
 
+    ```
+        spring.datasource.url
+        spring.datasource.username
+        spring.datasource.password
+    ```
+    执行创建表和导入基础数据脚本
     ```
     sh ./script/create_escheduler.sh
     ```

+ 1 - 1
escheduler-api/src/main/java/cn/escheduler/api/enums/Status.java

@@ -31,7 +31,7 @@ public enum Status {
     HDFS_OPERATION_ERROR(10006, "hdfs operation error"),
     UPDATE_FAILED(10007, "updateProcessInstance failed"),
     TASK_INSTANCE_HOST_NOT_FOUND(10008, "task instance does not set host"),
-    TENANT_NAME_EXIST(10009, "tenant name already exists"),
+    TENANT_NAME_EXIST(10009, "tenant code already exists"),
     USER_NOT_EXIST(10010, "user {0} not exists"),
     ALERT_GROUP_NOT_EXIST(10011, "alarm group not found"),
     ALERT_GROUP_EXIST(10012, "alarm group already exists"),

+ 5 - 1
escheduler-api/src/main/java/cn/escheduler/api/service/ProjectService.java

@@ -242,7 +242,11 @@ public class ProjectService extends BaseService{
         if (checkResult != null) {
             return checkResult;
         }
-
+        Project tempProject = projectMapper.queryByName(projectName);
+        if (tempProject != null && tempProject.getId() != projectId) {
+            putMsg(result, Status.PROJECT_ALREADY_EXISTS, projectName);
+            return result;
+        }
         project.setName(projectName);
         project.setDesc(desc);
         project.setUpdateTime(new Date());

+ 5 - 0
escheduler-api/src/main/java/cn/escheduler/api/service/UsersService.java

@@ -216,6 +216,11 @@ public class UsersService extends BaseService {
         Date now = new Date();
 
         if (StringUtils.isNotEmpty(userName)) {
+            User tempUser = userMapper.queryByUserName(userName);
+            if (tempUser != null && tempUser.getId() != userId) {
+                putMsg(result, Status.USER_NAME_EXIST);
+                return result;
+            }
             user.setUserName(userName);
         }
 

+ 1 - 1
script/del_zk_node.py

@@ -8,7 +8,7 @@ class ZkClient:
         self.zk.start()
     def del_node(self):
         self.zk.delete(sys.argv[2], recursive=True)
-	print('deleted success')
+        print('deleted success')
     def __del__(self):
         self.zk.stop()
 if __name__ == '__main__':