Browse Source

[Fix-5570][Worker] Fix worker.groups in worker.properties is still commented and improve install scripts and configurations (#5571)

Shiwen Cheng 3 years ago
parent
commit
a5a0c7c5f8

+ 2 - 8
docker/build/conf/dolphinscheduler/datasource.properties.tpl

@@ -15,19 +15,13 @@
 # limitations under the License.
 #
 
-# db
+# datasource configuration
 spring.datasource.driver-class-name=${DATABASE_DRIVER}
 spring.datasource.url=jdbc:${DATABASE_TYPE}://${DATABASE_HOST}:${DATABASE_PORT}/${DATABASE_DATABASE}${DATABASE_PARAMS:+?${DATABASE_PARAMS}}
 spring.datasource.username=${DATABASE_USERNAME}
 spring.datasource.password=${DATABASE_PASSWORD}
 
-# postgresql
-#spring.datasource.driver-class-name=org.postgresql.Driver
-#spring.datasource.url=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
-#spring.datasource.username=root
-#spring.datasource.password=root
-
-# mysql
+# mysql example
 #spring.datasource.driver-class-name=com.mysql.jdbc.Driver
 #spring.datasource.url=jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
 #spring.datasource.username=ds_user

+ 2 - 2
dolphinscheduler-dao/src/main/resources/datasource.properties

@@ -15,13 +15,13 @@
 # limitations under the License.
 #
 
-# postgresql
+# datasource configuration
 spring.datasource.driver-class-name=org.postgresql.Driver
 spring.datasource.url=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
 spring.datasource.username=root
 spring.datasource.password=root
 
-# mysql
+# mysql example
 #spring.datasource.driver-class-name=com.mysql.jdbc.Driver
 #spring.datasource.url=jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
 #spring.datasource.username=ds_user

+ 14 - 4
dolphinscheduler-server/src/main/resources/config/install_config.conf

@@ -53,9 +53,15 @@ deployUser="dolphinscheduler"
 # Note: find and load the Alert Plugin Jar from this dir.
 alertPluginDir="/data1_1T/dolphinscheduler/lib/plugin/alert"
 
+# user data local directory path, please make sure the directory exists and have read write permissions
+dataBasedirPath="/tmp/dolphinscheduler"
+
 # resource storage type: HDFS, S3, NONE
 resourceStorageType="NONE"
 
+# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
+resourceUploadPath="/dolphinscheduler"
+
 # if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory.
 # if S3,write S3 address,HA,for example :s3a://dolphinscheduler,
 # Note,s3 be sure to create the root directory /dolphinscheduler
@@ -67,7 +73,7 @@ s3AccessKey="xxxxxxxxxx"
 s3SecretKey="xxxxxxxxxx"
 
 # resourcemanager port, the default value is 8088 if not specified
-resourceManagerHttpAddressPort=8088
+resourceManagerHttpAddressPort="8088"
 
 # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
 yarnHaIps="192.168.xx.xx,192.168.xx.xx"
@@ -75,9 +81,6 @@ yarnHaIps="192.168.xx.xx,192.168.xx.xx"
 # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
 singleYarnIp="yarnIp1"
 
-# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
-resourceUploadPath="/dolphinscheduler"
-
 # who have permissions to create directory under HDFS/S3 root path
 # Note: if kerberos is enabled, please config hdfsRootUser=
 hdfsRootUser="hdfs"
@@ -91,7 +94,14 @@ krb5ConfPath="$installPath/conf/krb5.conf"
 keytabUserName="hdfs-mycluster@ESZ.COM"
 # username keytab path
 keytabPath="$installPath/conf/hdfs.headless.keytab"
+# kerberos expire time, the unit is hour
+kerberosExpireTime="2"
+
+# use sudo or not
+sudoEnable="true"
 
+# worker tenant auto create
+workerTenantAutoCreate="false"
 
 # api server port
 apiServerPort="12345"

+ 1 - 1
dolphinscheduler-service/src/main/resources/zookeeper.properties

@@ -19,7 +19,7 @@
 zookeeper.quorum=localhost:2181
 
 # dolphinscheduler root directory
-#zookeeper.dolphinscheduler.root=/dolphinscheduler
+zookeeper.dolphinscheduler.root=/dolphinscheduler
 
 # dolphinscheduler failover directory
 #zookeeper.session.timeout=60000

+ 32 - 27
install.sh

@@ -34,33 +34,38 @@ datasourceDriverClassname="com.mysql.jdbc.Driver"
 if [ $dbtype == "postgresql" ];then
   datasourceDriverClassname="org.postgresql.Driver"
 fi
-sed -i ${txt} "s#spring.datasource.driver-class-name.*#spring.datasource.driver-class-name=${datasourceDriverClassname}#g" conf/datasource.properties
-sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:${dbtype}://${dbhost}/${dbname}?characterEncoding=UTF-8\&allowMultiQueries=true#g" conf/datasource.properties
-sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${username}#g" conf/datasource.properties
-sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${password}#g" conf/datasource.properties
-
-sed -i ${txt} "s#fs.defaultFS.*#fs.defaultFS=${defaultFS}#g" conf/common.properties
-sed -i ${txt} "s#fs.s3a.endpoint.*#fs.s3a.endpoint=${s3Endpoint}#g" conf/common.properties
-sed -i ${txt} "s#fs.s3a.access.key.*#fs.s3a.access.key=${s3AccessKey}#g" conf/common.properties
-sed -i ${txt} "s#fs.s3a.secret.key.*#fs.s3a.secret.key=${s3SecretKey}#g" conf/common.properties
-sed -i ${txt} "s#resource.manager.httpaddress.port.*#resource.manager.httpaddress.port=${resourceManagerHttpAddressPort}#g" conf/common.properties
-sed -i ${txt} "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common.properties
-sed -i ${txt} "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:%s/ws/v1/cluster/apps/%s#g" conf/common.properties
-sed -i ${txt} "s#yarn.job.history.status.address.*#yarn.job.history.status.address=http://${singleYarnIp}:19888/ws/v1/history/mapreduce/jobs/%s#g" conf/common.properties
-sed -i ${txt} "s#hdfs.root.user.*#hdfs.root.user=${hdfsRootUser}#g" conf/common.properties
-sed -i ${txt} "s#resource.upload.path.*#resource.upload.path=${resourceUploadPath}#g" conf/common.properties
-sed -i ${txt} "s#resource.storage.type.*#resource.storage.type=${resourceStorageType}#g" conf/common.properties
-sed -i ${txt} "s#hadoop.security.authentication.startup.state.*#hadoop.security.authentication.startup.state=${kerberosStartUp}#g" conf/common.properties
-sed -i ${txt} "s#java.security.krb5.conf.path.*#java.security.krb5.conf.path=${krb5ConfPath}#g" conf/common.properties
-sed -i ${txt} "s#login.user.keytab.username.*#login.user.keytab.username=${keytabUserName}#g" conf/common.properties
-sed -i ${txt} "s#login.user.keytab.path.*#login.user.keytab.path=${keytabPath}#g" conf/common.properties
-sed -i ${txt} "s#zookeeper.quorum.*#zookeeper.quorum=${zkQuorum}#g" conf/zookeeper.properties
-sed -i ${txt} "s#\#zookeeper.dolphinscheduler.root.*#zookeeper.dolphinscheduler.root=${zkRoot}#g" conf/zookeeper.properties
-sed -i ${txt} "s#server.port.*#server.port=${apiServerPort}#g" conf/application-api.properties
-
-sed -i ${txt} "s#\#alert.plugin.dir.*#alert.plugin.dir=${alertPluginDir}#g" conf/alert.properties
-
-sed -i ${txt} "s#\#alert.listen.host.*#alert.listen.host=${alertServer}#g" conf/worker.properties
+sed -i ${txt} "s@^spring.datasource.driver-class-name=.*@spring.datasource.driver-class-name=${datasourceDriverClassname}@g" conf/datasource.properties
+sed -i ${txt} "s@^spring.datasource.url=.*@spring.datasource.url=jdbc:${dbtype}://${dbhost}/${dbname}?characterEncoding=UTF-8\&allowMultiQueries=true@g" conf/datasource.properties
+sed -i ${txt} "s@^spring.datasource.username=.*@spring.datasource.username=${username}@g" conf/datasource.properties
+sed -i ${txt} "s@^spring.datasource.password=.*@spring.datasource.password=${password}@g" conf/datasource.properties
+
+sed -i ${txt} "s@^#\?zookeeper.quorum=.*@zookeeper.quorum=${zkQuorum}@g" conf/zookeeper.properties
+sed -i ${txt} "s@^#\?zookeeper.dolphinscheduler.root=.*@zookeeper.dolphinscheduler.root=${zkRoot}@g" conf/zookeeper.properties
+
+sed -i ${txt} "s@^data.basedir.path=.*@data.basedir.path=${dataBasedirPath}@g" conf/common.properties
+sed -i ${txt} "s@^resource.storage.type=.*@resource.storage.type=${resourceStorageType}@g" conf/common.properties
+sed -i ${txt} "s@^resource.upload.path=.*@resource.upload.path=${resourceUploadPath}@g" conf/common.properties
+sed -i ${txt} "s@^hadoop.security.authentication.startup.state=.*@hadoop.security.authentication.startup.state=${kerberosStartUp}@g" conf/common.properties
+sed -i ${txt} "s@^java.security.krb5.conf.path=.*@java.security.krb5.conf.path=${krb5ConfPath}@g" conf/common.properties
+sed -i ${txt} "s@^login.user.keytab.username=.*@login.user.keytab.username=${keytabUserName}@g" conf/common.properties
+sed -i ${txt} "s@^login.user.keytab.path=.*@login.user.keytab.path=${keytabPath}@g" conf/common.properties
+sed -i ${txt} "s@^kerberos.expire.time=.*@kerberos.expire.time=${kerberosExpireTime}@g" conf/common.properties
+sed -i ${txt} "s@^hdfs.root.user=.*@hdfs.root.user=${hdfsRootUser}@g" conf/common.properties
+sed -i ${txt} "s@^fs.defaultFS=.*@fs.defaultFS=${defaultFS}@g" conf/common.properties
+sed -i ${txt} "s@^fs.s3a.endpoint=.*@fs.s3a.endpoint=${s3Endpoint}@g" conf/common.properties
+sed -i ${txt} "s@^fs.s3a.access.key=.*@fs.s3a.access.key=${s3AccessKey}@g" conf/common.properties
+sed -i ${txt} "s@^fs.s3a.secret.key=.*@fs.s3a.secret.key=${s3SecretKey}@g" conf/common.properties
+sed -i ${txt} "s@^resource.manager.httpaddress.port=.*@resource.manager.httpaddress.port=${resourceManagerHttpAddressPort}@g" conf/common.properties
+sed -i ${txt} "s@^yarn.resourcemanager.ha.rm.ids=.*@yarn.resourcemanager.ha.rm.ids=${yarnHaIps}@g" conf/common.properties
+sed -i ${txt} "s@^yarn.application.status.address=.*@yarn.application.status.address=http://${singleYarnIp}:%s/ws/v1/cluster/apps/%s@g" conf/common.properties
+sed -i ${txt} "s@^yarn.job.history.status.address=.*@yarn.job.history.status.address=http://${singleYarnIp}:19888/ws/v1/history/mapreduce/jobs/%s@g" conf/common.properties
+sed -i ${txt} "s@^sudo.enable=.*@sudo.enable=${sudoEnable}@g" conf/common.properties
+
+# the following configurations may be commented, so ddd #\? to ensure successful sed
+sed -i ${txt} "s@^#\?worker.tenant.auto.create=.*@worker.tenant.auto.create=${workerTenantAutoCreate}@g" conf/worker.properties
+sed -i ${txt} "s@^#\?alert.listen.host=.*@alert.listen.host=${alertServer}@g" conf/worker.properties
+sed -i ${txt} "s@^#\?alert.plugin.dir=.*@alert.plugin.dir=${alertPluginDir}@g" conf/alert.properties
+sed -i ${txt} "s@^#\?server.port=.*@server.port=${apiServerPort}@g" conf/application-api.properties
 
 # 2.create directory
 echo "2.create directory"

+ 1 - 1
script/scp-hosts.sh

@@ -53,7 +53,7 @@ do
   do
     # if worker in workersGroupMap
     if [[ "${workersGroupMap[${host}]}" ]] && [[ "${dsDir}" == "conf" ]]; then
-      sed -i ${txt} "s:.*worker.groups.*:worker.groups=${workersGroupMap[${host}]}:g" ${dsDir}/worker.properties
+      sed -i ${txt} "s@^#\?worker.groups=.*@worker.groups=${workersGroupMap[${host}]}@g" ${dsDir}/worker.properties
     fi
 
     echo "start to scp $dsDir to $host/$installPath"