123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351 |
- #!/bin/sh
- workDir=`dirname $0`
- workDir=`cd ${workDir};pwd`
- source ${workDir}/conf/config/run_config.conf
- source ${workDir}/conf/config/install_config.conf
- # mysql配置
- # mysql 地址,端口
- mysqlHost="192.168.xx.xx:3306"
- # mysql 数据库名称
- mysqlDb="escheduler"
- # mysql 用户名
- mysqlUserName="xx"
- # mysql 密码
- mysqlPassword="xx"
- # hadoop 配置
- # namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下
- namenodeFs="hdfs://mycluster:8020"
- # resourcemanager HA配置,如果是单resourcemanager,这里为空即可
- yarnHaIps="192.168.xx.xx,192.168.xx.xx"
- # 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好
- singleYarnIp="ark1"
- # common 配置
- # 程序路径
- programPath="/tmp/escheduler"
- #下载路径
- downloadPath="/tmp/escheduler/download"
- # 任务执行路径
- execPath="/tmp/escheduler/exec"
- # hdfs根路径
- hdfsPath="/escheduler"
- # 是否启动hdfs,如果启动则为true,不启动设置为false
- hdfsStartupSate="true"
- # SHELL环境变量路径
- shellEnvPath="/opt/.escheduler_env.sh"
- # Python换将变量路径
- pythonEnvPath="/opt/escheduler_env.py"
- # 资源文件的后缀
- resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
- # 开发状态,如果是true,对于SHELL脚本可以在execPath目录下查看封装后的SHELL脚本,如果是false则执行完成直接删除
- devState="true"
- # zk 配置
- # zk集群
- zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
- # zk根目录
- zkRoot="/escheduler"
- # 用来记录挂掉机器的zk目录
- zkDeadServers="/escheduler/dead-servers"
- # masters目录
- zkMasters="/escheduler/masters"
- # workers目录
- zkWorkers="/escheduler/workers"
- # zk master分布式锁
- mastersLock="/escheduler/lock/masters"
- # zk worker分布式锁
- workersLock="/escheduler/lock/workers"
- # zk master容错分布式锁
- mastersFailover="/escheduler/lock/failover/masters"
- # zk worker容错分布式锁
- workersFailover="/escheduler/lock/failover/masters"
- # zk session 超时
- zkSessionTimeout="300"
- # zk 连接超时
- zkConnectionTimeout="300"
- # zk 重试间隔
- zkRetrySleep="100"
- # zk重试最大次数
- zkRetryMaxtime="5"
- # master 配置
- # master执行线程最大数,流程实例的最大并行度
- masterExecThreads="100"
- # master任务执行线程最大数,每一个流程实例的最大并行度
- masterExecTaskNum="20"
- # master心跳间隔
- masterHeartbeatInterval="10"
- # master任务提交重试次数
- masterTaskCommitRetryTimes="5"
- # master任务提交重试时间间隔
- masterTaskCommitInterval="100"
- # master最大cpu平均负载,用来判断master是否还有执行能力
- masterMaxCupLoadAvg="10"
- # master预留内存,用来判断master是否还有执行能力
- masterReservedMemory="1"
- # worker 配置
- # worker执行线程
- workerExecThreads="100"
- # worker心跳间隔
- workerHeartbeatInterval="10"
- # worker一次抓取任务数
- workerFetchTaskNum="10"
- # worker最大cpu平均负载,用来判断master是否还有执行能力
- workerMaxCupLoadAvg="10"
- # worker预留内存,用来判断master是否还有执行能力
- workerReservedMemory="1"
- # api 配置
- # api 服务端口
- apiServerPort="12345"
- # api session 超时
- apiServerSessionTimeout="7200"
- # api 上下文路径
- apiServerContextPath="/escheduler/"
- # spring 最大文件大小
- springMaxFileSize="1024MB"
- # spring 最大请求文件大小
- springMaxRequestSize="1024MB"
- # api 最大post请求大小
- apiMaxHttpPostSize="5000000"
- # alert配置
- # 邮件协议
- mailProtocol="SMTP"
- # 邮件服务host
- mailServerHost="smtp.exmail.qq.com"
- # 邮件服务端口
- mailServerPort="25"
- # 发送人
- mailSender="xxxxxxxxxx"
- # 发送人密码
- mailPassword="xxxxxxxxxx"
- # 下载Excel路径
- xlsFilePath="/opt/xls"
- # conf/config/install_config.conf配置
- # 安装路径
- installPath="/data1_1T/escheduler"
- # 部署用户
- deployUser="escheduler"
- # 安装hosts
- ips="ark0,ark1,ark2,ark3,ark4"
- # conf/config/run_config.conf配置
- # 运行Master的机器
- masters="ark0,ark1"
- # 运行Worker的机器
- workers="ark2,ark3,ark4"
- # 运行Alert的机器
- alertServer="ark3"
- # 运行Api的机器
- apiServers="ark1"
- # 1,替换文件
- echo "1,替换文件"
- sed -i '' "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/dao/data_source.properties
- sed -i '' "s#spring.datasource.username.*#spring.datasource.username=${mysqlUserName}#g" conf/dao/data_source.properties
- sed -i '' "s#spring.datasource.password.*#spring.datasource.password=${mysqlPassword}#g" conf/dao/data_source.properties
- sed -i '' "s#org.quartz.dataSource.myDs.URL.*#org.quartz.dataSource.myDs.URL=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/quartz.properties
- sed -i '' "s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.user=${mysqlUserName}#g" conf/quartz.properties
- sed -i '' "s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=${mysqlPassword}#g" conf/quartz.properties
- sed -i '' "s#fs.defaultFS.*#fs.defaultFS = ${namenodeFs}#g" conf/common/hadoop/hadoop.properties
- sed -i '' "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common/hadoop/hadoop.properties
- sed -i '' "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:8088/ws/v1/cluster/apps/%s#g" conf/common/hadoop/hadoop.properties
- sed -i '' "s#data.basedir.path.*#data.basedir.path=${programPath}#g" conf/common/common.properties
- sed -i '' "s#data.download.basedir.path.*#data.download.basedir.path=${downloadPath}#g" conf/common/common.properties
- sed -i '' "s#process.exec.basepath.*#process.exec.basepath=${execPath}#g" conf/common/common.properties
- sed -i '' "s#data.store2hdfs.basepath.*#data.store2hdfs.basepath=${hdfsPath}#g" conf/common/common.properties
- sed -i '' "s#hdfs.startup.state.*#hdfs.startup.state=${hdfsStartupSate}#g" conf/common/common.properties
- sed -i '' "s#escheduler.env.path.*#escheduler.env.path=${shellEnvPath}#g" conf/common/common.properties
- sed -i '' "s#escheduler.env.py.*#escheduler.env.py=${pythonEnvPath}#g" conf/common/common.properties
- sed -i '' "s#resource.view.suffixs.*#resource.view.suffixs=${resSuffixs}#g" conf/common/common.properties
- sed -i '' "s#development.state.*#development.state=${devState}#g" conf/common/common.properties
- sed -i '' "s#zookeeper.quorum.*#zookeeper.quorum=${zkQuorum}#g" conf/zookeeper.properties
- sed -i '' "s#zookeeper.escheduler.root.*#zookeeper.escheduler.root=${zkRoot}#g" conf/zookeeper.properties
- sed -i '' "s#zookeeper.escheduler.dead.servers.*#zookeeper.escheduler.dead.servers=${zkDeadServers}#g" conf/zookeeper.properties
- sed -i '' "s#zookeeper.escheduler.masters.*#zookeeper.escheduler.masters=${zkMasters}#g" conf/zookeeper.properties
- sed -i '' "s#zookeeper.escheduler.workers.*#zookeeper.escheduler.workers=${zkWorkers}#g" conf/zookeeper.properties
- sed -i '' "s#zookeeper.escheduler.lock.masters.*#zookeeper.escheduler.lock.masters=${mastersLock}#g" conf/zookeeper.properties
- sed -i '' "s#zookeeper.escheduler.lock.workers.*#zookeeper.escheduler.lock.workers=${workersLock}#g" conf/zookeeper.properties
- sed -i '' "s#zookeeper.escheduler.lock.failover.masters.*#zookeeper.escheduler.lock.failover.masters=${mastersFailover}#g" conf/zookeeper.properties
- sed -i '' "s#zookeeper.escheduler.lock.failover.workers.*#zookeeper.escheduler.lock.failover.workers=${workersFailover}#g" conf/zookeeper.properties
- sed -i '' "s#zookeeper.session.timeout.*#zookeeper.session.timeout=${zkSessionTimeout}#g" conf/zookeeper.properties
- sed -i '' "s#zookeeper.connection.timeout.*#zookeeper.connection.timeout=${zkConnectionTimeout}#g" conf/zookeeper.properties
- sed -i '' "s#zookeeper.retry.sleep.*#zookeeper.retry.sleep=${zkRetrySleep}#g" conf/zookeeper.properties
- sed -i '' "s#zookeeper.retry.maxtime.*#zookeeper.retry.maxtime=${zkRetryMaxtime}#g" conf/zookeeper.properties
- sed -i '' "s#master.exec.threads.*#master.exec.threads=${masterExecThreads}#g" conf/master.properties
- sed -i '' "s#master.exec.task.number.*#master.exec.task.number=${masterExecTaskNum}#g" conf/master.properties
- sed -i '' "s#master.heartbeat.interval.*#master.heartbeat.interval=${masterHeartbeatInterval}#g" conf/master.properties
- sed -i '' "s#master.task.commit.retryTimes.*#master.task.commit.retryTimes=${masterTaskCommitRetryTimes}#g" conf/master.properties
- sed -i '' "s#master.task.commit.interval.*#master.task.commit.interval=${masterTaskCommitInterval}#g" conf/master.properties
- sed -i '' "s#master.max.cpuload.avg.*#master.max.cpuload.avg=${masterMaxCupLoadAvg}#g" conf/master.properties
- sed -i '' "s#master.reserved.memory.*#master.reserved.memory=${masterReservedMemory}#g" conf/master.properties
- sed -i '' "s#worker.exec.threads.*#worker.exec.threads=${workerExecThreads}#g" conf/worker.properties
- sed -i '' "s#worker.heartbeat.interval.*#worker.heartbeat.interval=${workerHeartbeatInterval}#g" conf/worker.properties
- sed -i '' "s#worker.fetch.task.num.*#worker.fetch.task.num=${workerFetchTaskNum}#g" conf/worker.properties
- sed -i '' "s#worker.max.cpuload.avg.*#worker.max.cpuload.avg=${workerMaxCupLoadAvg}#g" conf/worker.properties
- sed -i '' "s#worker.reserved.memory.*#worker.reserved.memory=${workerReservedMemory}#g" conf/worker.properties
- sed -i '' "s#server.port.*#server.port=${apiServerPort}#g" conf/application.properties
- sed -i '' "s#server.session.timeout.*#server.session.timeout=${apiServerSessionTimeout}#g" conf/application.properties
- sed -i '' "s#server.context-path.*#server.context-path=${apiServerContextPath}#g" conf/application.properties
- sed -i '' "s#spring.http.multipart.max-file-size.*#spring.http.multipart.max-file-size=${springMaxFileSize}#g" conf/application.properties
- sed -i '' "s#spring.http.multipart.max-request-size.*#spring.http.multipart.max-request-size=${springMaxRequestSize}#g" conf/application.properties
- sed -i '' "s#server.max-http-post-size.*#server.max-http-post-size=${apiMaxHttpPostSize}#g" conf/application.properties
- sed -i '' "s#mail.protocol.*#mail.protocol=${mailProtocol}#g" conf/alert.properties
- sed -i '' "s#mail.server.host.*#mail.server.host=${mailServerHost}#g" conf/alert.properties
- sed -i '' "s#mail.server.port.*#mail.server.port=${mailServerPort}#g" conf/alert.properties
- sed -i '' "s#mail.sender.*#mail.sender=${mailSender}#g" conf/alert.properties
- sed -i '' "s#mail.passwd.*#mail.passwd=${mailPassword}#g" conf/alert.properties
- sed -i '' "s#xls.file.path.*#xls.file.path=${xlsFilePath}#g" conf/alert.properties
- sed -i '' "s#installPath.*#installPath=${installPath}#g" conf/config/install_config.conf
- sed -i '' "s#deployUser.*#deployUser=${deployUser}#g" conf/config/install_config.conf
- sed -i '' "s#ips.*#ips=${ips}#g" conf/config/install_config.conf
- sed -i '' "s#masters.*#masters=${masters}#g" conf/config/run_config.conf
- sed -i '' "s#workers.*#workers=${workers}#g" conf/config/run_config.conf
- sed -i '' "s#alertServer.*#alertServer=${alertServer}#g" conf/config/run_config.conf
- sed -i '' "s#apiServers.*#apiServers=${apiServers}#g" conf/config/run_config.conf
- # 2,创建目录
- echo "2,创建目录"
- if [ ! -d $installPath ];then
- sudo mkdir -p $installPath
- sudo chown -R $deployUser:$deployUser $installPath
- fi
- hostsArr=(${ips//,/ })
- for host in ${hostsArr[@]}
- do
- # 如果programPath不存在,则创建
- if ! ssh $host test -e $programPath; then
- ssh $host "sudo mkdir -p $programPath;sudo chown -R $deployUser:$deployUser $programPath"
- fi
- # 如果downloadPath不存在,则创建
- if ! ssh $host test -e $downloadPath; then
- ssh $host "sudo mkdir -p $downloadPath;sudo chown -R $deployUser:$deployUser $downloadPath"
- fi
- # 如果$execPath不存在,则创建
- if ! ssh $host test -e $execPath; then
- ssh $host "sudo mkdir -p $execPath; sudo chown -R $deployUser:$deployUser $execPath"
- fi
- # 如果$xlsFilePath不存在,则创建
- if ! ssh $host test -e $xlsFilePath; then
- ssh $host "sudo mkdir -p $xlsFilePath; sudo chown -R $deployUser:$deployUser $xlsFilePath"
- fi
- done
- # 3,停止服务
- echo "3,停止服务"
- sh ${workDir}/script/stop_all.sh
- # 4,删除zk节点
- echo "4,删除zk节点"
- sleep 1
- python ${workDir}/script/del_zk_node.py $zkQuorum $zkRoot
- # 5,scp资源
- echo "5,scp资源"
- sh ${workDir}/script/scp_hosts.sh
- if [ $? -eq 0 ]
- then
- echo 'scp拷贝完成'
- else
- echo 'sc 拷贝失败退出'
- exit -1
- fi
- # 6,启动
- echo "6,启动"
- sh ${workDir}/script/start_all.sh
|