|
@@ -66,6 +66,84 @@ externalRegistry:
|
|
|
registryPluginName: "zookeeper"
|
|
|
registryServers: "127.0.0.1:2181"
|
|
|
|
|
|
+conf:
|
|
|
+ common:
|
|
|
+ # user data local directory path, please make sure the directory exists and have read write permissions
|
|
|
+ data.basedir.path: /tmp/dolphinscheduler
|
|
|
+
|
|
|
+ # resource storage type: HDFS, S3, NONE
|
|
|
+ resource.storage.type: HDFS
|
|
|
+
|
|
|
+ # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
|
|
|
+ resource.upload.path: /dolphinscheduler
|
|
|
+
|
|
|
+ # whether to startup kerberos
|
|
|
+ hadoop.security.authentication.startup.state: false
|
|
|
+
|
|
|
+ # java.security.krb5.conf path
|
|
|
+ java.security.krb5.conf.path: /opt/krb5.conf
|
|
|
+
|
|
|
+ # login user from keytab username
|
|
|
+ login.user.keytab.username: hdfs-mycluster@ESZ.COM
|
|
|
+
|
|
|
+ # login user from keytab path
|
|
|
+ login.user.keytab.path: /opt/hdfs.headless.keytab
|
|
|
+
|
|
|
+ # kerberos expire time, the unit is hour
|
|
|
+ kerberos.expire.time: 2
|
|
|
+ # resource view suffixs
|
|
|
+ #resource.view.suffixs: txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
|
|
|
+ # if resource.storage.type: HDFS, the user must have the permission to create directories under the HDFS root path
|
|
|
+ hdfs.root.user: hdfs
|
|
|
+ # if resource.storage.type: S3, the value like: s3a://dolphinscheduler; if resource.storage.type: HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
|
|
|
+ fs.defaultFS: file:///
|
|
|
+ aws.access.key.id: minioadmin
|
|
|
+ aws.secret.access.key: minioadmin
|
|
|
+ aws.region: us-east-1
|
|
|
+ aws.endpoint: http://localhost:9000
|
|
|
+ # resourcemanager port, the default value is 8088 if not specified
|
|
|
+ resource.manager.httpaddress.port: 8088
|
|
|
+ # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
|
|
|
+ yarn.resourcemanager.ha.rm.ids: 192.168.xx.xx,192.168.xx.xx
|
|
|
+ # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
|
|
|
+ yarn.application.status.address: http://ds1:%s/ws/v1/cluster/apps/%s
|
|
|
+ # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
|
|
|
+ yarn.job.history.status.address: http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
|
|
|
+
|
|
|
+ # datasource encryption enable
|
|
|
+ datasource.encryption.enable: false
|
|
|
+
|
|
|
+ # datasource encryption salt
|
|
|
+ datasource.encryption.salt: '!@#$%^&*'
|
|
|
+
|
|
|
+ # data quality option
|
|
|
+ data-quality.jar.name: dolphinscheduler-data-quality-dev-SNAPSHOT.jar
|
|
|
+
|
|
|
+ #data-quality.error.output.path: /tmp/data-quality-error-data
|
|
|
+
|
|
|
+ # Network IP gets priority, default inner outer
|
|
|
+
|
|
|
+ # Whether hive SQL is executed in the same session
|
|
|
+ support.hive.oneSession: false
|
|
|
+
|
|
|
+ # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
|
|
|
+ sudo.enable: true
|
|
|
+
|
|
|
+ # network interface preferred like eth0, default: empty
|
|
|
+ #dolphin.scheduler.network.interface.preferred:
|
|
|
+
|
|
|
+ # network IP gets priority, default: inner outer
|
|
|
+ #dolphin.scheduler.network.priority.strategy: default
|
|
|
+
|
|
|
+ # system env path
|
|
|
+ #dolphinscheduler.env.path: dolphinscheduler_env.sh
|
|
|
+ # development state
|
|
|
+ development.state: false
|
|
|
+ # rpc port
|
|
|
+ alert.rpc.port: 50052
|
|
|
+ # Url endpoint for zeppelin RESTful API
|
|
|
+ zeppelin.rest.url: http://localhost:8080
|
|
|
+
|
|
|
common:
|
|
|
## Configmap
|
|
|
configmap:
|