123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681 |
- timezone: "Asia/Shanghai"
- initImage:
- pullPolicy: "IfNotPresent"
- busybox: "busybox:1.30.1"
- image:
- registry: "dolphinscheduler.docker.scarf.sh/apache"
- tag: "dev-SNAPSHOT"
- pullPolicy: "IfNotPresent"
- pullSecret: ""
- master: dolphinscheduler-master
- worker: dolphinscheduler-worker
- api: dolphinscheduler-api
- alert: dolphinscheduler-alert-server
- tools: dolphinscheduler-tools
- postgresql:
- enabled: true
- postgresqlUsername: "root"
- postgresqlPassword: "root"
- postgresqlDatabase: "dolphinscheduler"
- driverClassName: "org.postgresql.Driver"
- params: "characterEncoding=utf8"
- persistence:
- enabled: false
- size: "20Gi"
- storageClass: "-"
- mysql:
- enabled: false
- driverClassName: "com.mysql.cj.jdbc.Driver"
- auth:
- username: "ds"
- password: "ds"
- database: "dolphinscheduler"
- params: "characterEncoding=utf8"
- primary:
- persistence:
- enabled: false
- size: "20Gi"
- storageClass: "-"
- minio:
- enabled: true
- auth:
- rootUser: minioadmin
- rootPassword: minioadmin
- persistence:
- enabled: false
- defaultBuckets: "dolphinscheduler"
- externalDatabase:
- enabled: false
- type: "postgresql"
- host: "localhost"
- port: "5432"
- username: "root"
- password: "root"
- database: "dolphinscheduler"
- params: "characterEncoding=utf8"
- driverClassName: "org.postgresql.Driver"
- zookeeper:
- enabled: true
- service:
- port: 2181
- fourlwCommandsWhitelist: "srvr,ruok,wchs,cons"
- persistence:
- enabled: false
- size: "20Gi"
- storageClass: "-"
- etcd:
- enabled: false
- endpoints: ""
- namespace: "dolphinscheduler"
- user: ""
- passWord: ""
- authority: ""
-
- ssl:
- enabled: false
- certFile: "etcd-certs/ca.crt"
- keyCertChainFile: "etcd-certs/client.crt"
- keyFile: "etcd-certs/client.pem"
- externalRegistry:
- registryPluginName: "zookeeper"
- registryServers: "127.0.0.1:2181"
- security:
- authentication:
- type: PASSWORD
- ldap:
- urls: ldap://ldap.forumsys.com:389/
- basedn: dc=example,dc=com
- username: cn=read-only-admin,dc=example,dc=com
- password: password
- user:
- admin: read-only-admin
- identityattribute: uid
- emailattribute: mail
- notexistaction: CREATE
- ssl:
- enable: false
-
- truststore: "/opt/ldapkeystore.jks"
-
-
-
-
- jksbase64content: ""
- truststorepassword: ""
- conf:
-
- auto: false
-
- common:
-
- data.basedir.path: /tmp/dolphinscheduler
-
- resource.storage.type: S3
-
- resource.storage.upload.base.path: /dolphinscheduler
-
- resource.aws.access.key.id: minioadmin
-
- resource.aws.secret.access.key: minioadmin
-
- resource.aws.region: ca-central-1
-
- resource.aws.s3.bucket.name: dolphinscheduler
-
- resource.aws.s3.endpoint: http://minio:9000
-
- resource.alibaba.cloud.access.key.id: <your-access-key-id>
- # alibaba cloud access key secret, required if you set resource.storage.type=OSS
- resource.alibaba.cloud.access.key.secret: <your-access-key-secret>
- # alibaba cloud region, required if you set resource.storage.type=OSS
- resource.alibaba.cloud.region: cn-hangzhou
- # oss bucket name, required if you set resource.storage.type=OSS
- resource.alibaba.cloud.oss.bucket.name: dolphinscheduler
- # oss bucket endpoint, required if you set resource.storage.type=OSS
- resource.alibaba.cloud.oss.endpoint: https://oss-cn-hangzhou.aliyuncs.com
- # azure storage account name, required if you set resource.storage.type=ABS
- resource.azure.client.id: minioadmin
- # azure storage account key, required if you set resource.storage.type=ABS
- resource.azure.client.secret: minioadmin
- # azure storage subId, required if you set resource.storage.type=ABS
- resource.azure.subId: minioadmin
- # azure storage tenantId, required if you set resource.storage.type=ABS
- resource.azure.tenant.id: minioadmin
- # if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
- resource.hdfs.root.user: hdfs
- # if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
- resource.hdfs.fs.defaultFS: hdfs://mycluster:8020
- # whether to startup kerberos
- hadoop.security.authentication.startup.state: false
- # java.security.krb5.conf path
- java.security.krb5.conf.path: /opt/krb5.conf
- # login user from keytab username
- login.user.keytab.username: hdfs-mycluster@ESZ.COM
- # login user from keytab path
- login.user.keytab.path: /opt/hdfs.headless.keytab
- # kerberos expire time, the unit is hour
- kerberos.expire.time: 2
- # resourcemanager port, the default value is 8088 if not specified
- resource.manager.httpaddress.port: 8088
- # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
- yarn.resourcemanager.ha.rm.ids: 192.168.xx.xx,192.168.xx.xx
- # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
- yarn.application.status.address: http://ds1:%s/ws/v1/cluster/apps/%s
- # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
- yarn.job.history.status.address: http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
- # datasource encryption enable
- datasource.encryption.enable: false
- # datasource encryption salt
- datasource.encryption.salt: '!@#$%^&*'
- # data quality option
- data-quality.jar.name: dolphinscheduler-data-quality-dev-SNAPSHOT.jar
- # Whether hive SQL is executed in the same session
- support.hive.oneSession: false
- # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
- sudo.enable: true
- # development state
- development.state: false
- # rpc port
- alert.rpc.port: 50052
- # set path of conda.sh
- conda.path: /opt/anaconda3/etc/profile.d/conda.sh
- # Task resource limit state
- task.resource.limit.state: false
- # mlflow task plugin preset repository
- ml.mlflow.preset_repository: https://github.com/apache/dolphinscheduler-mlflow
- # mlflow task plugin preset repository version
- ml.mlflow.preset_repository_version: "main"
- # way to collect applicationId: log, aop
- appId.collect: log
- common:
-
- configmap:
- DOLPHINSCHEDULER_OPTS: ""
- DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
- RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
-
- HADOOP_HOME: "/opt/soft/hadoop"
- HADOOP_CONF_DIR: "/opt/soft/hadoop/etc/hadoop"
- SPARK_HOME: "/opt/soft/spark"
- PYTHON_LAUNCHER: "/usr/bin/python/bin/python3"
- JAVA_HOME: "/opt/java/openjdk"
- HIVE_HOME: "/opt/soft/hive"
- FLINK_HOME: "/opt/soft/flink"
- DATAX_LAUNCHER: "/opt/soft/datax/bin/datax.py"
-
- sharedStoragePersistence:
- enabled: false
- mountPath: "/opt/soft"
- accessModes:
- - "ReadWriteMany"
-
- storageClassName: "-"
- storage: "20Gi"
-
- fsFileResourcePersistence:
- enabled: false
- accessModes:
- - "ReadWriteMany"
-
- storageClassName: "-"
- storage: "20Gi"
- master:
-
- podManagementPolicy: "Parallel"
-
- replicas: "3"
-
-
- annotations: {}
-
-
- affinity: {}
-
-
-
- nodeSelector: {}
-
-
- tolerations: []
-
-
- resources: {}
-
-
-
-
-
-
-
-
-
- livenessProbe:
- enabled: true
- initialDelaySeconds: "30"
- periodSeconds: "30"
- timeoutSeconds: "5"
- failureThreshold: "3"
- successThreshold: "1"
-
-
- readinessProbe:
- enabled: true
- initialDelaySeconds: "30"
- periodSeconds: "30"
- timeoutSeconds: "5"
- failureThreshold: "3"
- successThreshold: "1"
-
-
-
-
- persistentVolumeClaim:
- enabled: false
- accessModes:
- - "ReadWriteOnce"
- storageClassName: "-"
- storage: "20Gi"
- env:
- JAVA_OPTS: "-Xms1g -Xmx1g -Xmn512m"
- MASTER_EXEC_THREADS: "100"
- MASTER_EXEC_TASK_NUM: "20"
- MASTER_DISPATCH_TASK_NUM: "3"
- MASTER_HOST_SELECTOR: "LowerWeight"
- MASTER_HEARTBEAT_INTERVAL: "10s"
- MASTER_HEARTBEAT_ERROR_THRESHOLD: "5"
- MASTER_TASK_COMMIT_RETRYTIMES: "5"
- MASTER_TASK_COMMIT_INTERVAL: "1s"
- MASTER_STATE_WHEEL_INTERVAL: "5s"
- MASTER_MAX_CPU_LOAD_AVG: "1"
- MASTER_RESERVED_MEMORY: "0.3"
- MASTER_FAILOVER_INTERVAL: "10m"
- MASTER_KILL_APPLICATION_WHEN_HANDLE_FAILOVER: "true"
- service:
-
- annotations: {}
-
- serviceMonitor:
-
- enabled: false
-
- interval: 15s
-
- path: /actuator/prometheus
-
- labels: {}
-
- annotations: {}
- worker:
-
- podManagementPolicy: "Parallel"
-
- replicas: "3"
-
-
- annotations: {}
-
-
- affinity: {}
-
-
-
- nodeSelector: {}
-
-
- tolerations: []
-
-
- resources: {}
-
-
-
-
-
-
-
-
-
- livenessProbe:
- enabled: true
- initialDelaySeconds: "30"
- periodSeconds: "30"
- timeoutSeconds: "5"
- failureThreshold: "3"
- successThreshold: "1"
-
-
- readinessProbe:
- enabled: true
- initialDelaySeconds: "30"
- periodSeconds: "30"
- timeoutSeconds: "5"
- failureThreshold: "3"
- successThreshold: "1"
-
-
-
-
- persistentVolumeClaim:
- enabled: false
-
- dataPersistentVolume:
- enabled: false
- accessModes:
- - "ReadWriteOnce"
- storageClassName: "-"
- storage: "20Gi"
-
- logsPersistentVolume:
- enabled: false
- accessModes:
- - "ReadWriteOnce"
- storageClassName: "-"
- storage: "20Gi"
- env:
- WORKER_MAX_CPU_LOAD_AVG: "1"
- WORKER_RESERVED_MEMORY: "0.3"
- WORKER_EXEC_THREADS: "100"
- WORKER_HEARTBEAT_INTERVAL: "10s"
- WORKER_HEART_ERROR_THRESHOLD: "5"
- WORKER_HOST_WEIGHT: "100"
- keda:
- enabled: false
- namespaceLabels: { }
-
- pollingInterval: 5
-
-
- cooldownPeriod: 30
-
- minReplicaCount: 0
-
- maxReplicaCount: 3
-
- advanced: { }
-
-
-
-
-
-
-
-
- service:
-
- annotations: {}
-
- serviceMonitor:
-
- enabled: false
-
- interval: 15s
-
- path: /actuator/prometheus
-
- labels: {}
-
- annotations: {}
- alert:
-
- replicas: 1
-
- strategy:
- type: "RollingUpdate"
- rollingUpdate:
- maxSurge: "25%"
- maxUnavailable: "25%"
-
-
- annotations: {}
-
-
- affinity: {}
-
-
-
- nodeSelector: {}
-
-
- tolerations: []
-
-
- resources: {}
-
-
-
-
-
-
-
-
-
- livenessProbe:
- enabled: true
- initialDelaySeconds: "30"
- periodSeconds: "30"
- timeoutSeconds: "5"
- failureThreshold: "3"
- successThreshold: "1"
-
-
- readinessProbe:
- enabled: true
- initialDelaySeconds: "30"
- periodSeconds: "30"
- timeoutSeconds: "5"
- failureThreshold: "3"
- successThreshold: "1"
-
-
- persistentVolumeClaim:
- enabled: false
- accessModes:
- - "ReadWriteOnce"
- storageClassName: "-"
- storage: "20Gi"
- env:
- JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
- service:
-
- annotations: {}
-
- serviceMonitor:
-
- enabled: false
-
- interval: 15s
-
- path: /actuator/prometheus
-
- labels: {}
-
- annotations: {}
- api:
-
- replicas: "1"
-
- strategy:
- type: "RollingUpdate"
- rollingUpdate:
- maxSurge: "25%"
- maxUnavailable: "25%"
-
-
- annotations: {}
-
-
- affinity: {}
-
-
-
- nodeSelector: {}
-
-
- tolerations: []
-
-
- resources: {}
-
-
-
-
-
-
-
-
-
- livenessProbe:
- enabled: true
- initialDelaySeconds: "30"
- periodSeconds: "30"
- timeoutSeconds: "5"
- failureThreshold: "3"
- successThreshold: "1"
-
-
- readinessProbe:
- enabled: true
- initialDelaySeconds: "30"
- periodSeconds: "30"
- timeoutSeconds: "5"
- failureThreshold: "3"
- successThreshold: "1"
-
-
- persistentVolumeClaim:
- enabled: false
- accessModes:
- - "ReadWriteOnce"
- storageClassName: "-"
- storage: "20Gi"
- service:
-
- type: "ClusterIP"
-
- clusterIP: ""
-
- nodePort: ""
-
- pythonNodePort: ""
-
- externalIPs: []
-
- externalName: ""
-
- loadBalancerIP: ""
-
-
- annotations: {}
-
- serviceMonitor:
-
- enabled: false
-
- interval: 15s
-
- path: /dolphinscheduler/actuator/prometheus
-
- labels: {}
-
- annotations: {}
- env:
- JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
- ingress:
- enabled: false
- host: "dolphinscheduler.org"
- path: "/dolphinscheduler"
- annotations: {}
- tls:
- enabled: false
- secretName: "dolphinscheduler-tls"
|