values.yaml 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944
  1. #
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. #
  17. # Default values for dolphinscheduler-chart.
  18. # This is a YAML-formatted file.
  19. # Declare variables to be passed into your templates.
  20. # -- World time and date for cities in all time zones
  21. timezone: "Asia/Shanghai"
  22. # -- Used to detect whether dolphinscheduler dependent services such as database are ready
  23. initImage:
  24. # -- Image pull policy. Options: Always, Never, IfNotPresent
  25. pullPolicy: "IfNotPresent"
  26. # -- Specify initImage repository
  27. busybox: "busybox:1.30.1"
  28. image:
  29. # -- Docker image repository for the DolphinScheduler
  30. registry: apache/dolphinscheduler
  31. # -- Docker image version for the DolphinScheduler
  32. tag: 3.2.1
  33. # -- Image pull policy. Options: Always, Never, IfNotPresent
  34. pullPolicy: "IfNotPresent"
  35. # -- Specify a imagePullSecrets
  36. pullSecret: ""
  37. # -- master image
  38. master: dolphinscheduler-master
  39. # -- worker image
  40. worker: dolphinscheduler-worker
  41. # -- api-server image
  42. api: dolphinscheduler-api
  43. # -- alert-server image
  44. alert: dolphinscheduler-alert-server
  45. # -- tools image
  46. tools: dolphinscheduler-tools
  47. postgresql:
  48. # -- If not exists external PostgreSQL, by default, the DolphinScheduler will use a internal PostgreSQL
  49. enabled: true
  50. # -- The username for internal PostgreSQL
  51. postgresqlUsername: "root"
  52. # -- The password for internal PostgreSQL
  53. postgresqlPassword: "root"
  54. # -- The database for internal PostgreSQL
  55. postgresqlDatabase: "dolphinscheduler"
  56. # -- The driverClassName for internal PostgreSQL
  57. driverClassName: "org.postgresql.Driver"
  58. # -- The params for internal PostgreSQL
  59. params: "characterEncoding=utf8"
  60. persistence:
  61. # -- Set postgresql.persistence.enabled to true to mount a new volume for internal PostgreSQL
  62. enabled: false
  63. # -- `PersistentVolumeClaim` size
  64. size: "20Gi"
  65. # -- PostgreSQL data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning
  66. storageClass: "-"
  67. mysql:
  68. # -- If not exists external MySQL, by default, the DolphinScheduler will use a internal MySQL
  69. enabled: false
  70. # -- mysql driverClassName
  71. driverClassName: "com.mysql.cj.jdbc.Driver"
  72. auth:
  73. # -- mysql username
  74. username: "ds"
  75. # -- mysql password
  76. password: "ds"
  77. # -- mysql database
  78. database: "dolphinscheduler"
  79. # -- mysql params
  80. params: "characterEncoding=utf8"
  81. primary:
  82. persistence:
  83. # -- Set mysql.primary.persistence.enabled to true to mount a new volume for internal MySQL
  84. enabled: false
  85. # -- `PersistentVolumeClaim` size
  86. size: "20Gi"
  87. # -- MySQL data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning
  88. storageClass: "-"
  89. minio:
  90. # -- Deploy minio and configure it as the default storage for DolphinScheduler, note this is for demo only, not for production.
  91. enabled: true
  92. auth:
  93. # -- minio username
  94. rootUser: minioadmin
  95. # -- minio password
  96. rootPassword: minioadmin
  97. persistence:
  98. # -- Set minio.persistence.enabled to true to mount a new volume for internal minio
  99. enabled: false
  100. # -- minio default buckets
  101. defaultBuckets: "dolphinscheduler"
  102. externalDatabase:
  103. # -- If exists external database, and set postgresql.enable value to false.
  104. # external database will be used, otherwise Dolphinscheduler's internal database will be used.
  105. enabled: false
  106. # -- The type of external database, supported types: postgresql, mysql
  107. type: "postgresql"
  108. # -- The host of external database
  109. host: "localhost"
  110. # -- The port of external database
  111. port: "5432"
  112. # -- The username of external database
  113. username: "root"
  114. # -- The password of external database
  115. password: "root"
  116. # -- The database of external database
  117. database: "dolphinscheduler"
  118. # -- The params of external database
  119. params: "characterEncoding=utf8"
  120. # -- The driverClassName of external database
  121. driverClassName: "org.postgresql.Driver"
  122. zookeeper:
  123. # -- If not exists external registry, the zookeeper registry will be used by default.
  124. enabled: true
  125. service:
  126. # -- The port of zookeeper
  127. port: 2181
  128. # -- A list of comma separated Four Letter Words commands to use
  129. fourlwCommandsWhitelist: "srvr,ruok,wchs,cons"
  130. persistence:
  131. # -- Set `zookeeper.persistence.enabled` to true to mount a new volume for internal ZooKeeper
  132. enabled: false
  133. # -- PersistentVolumeClaim size
  134. size: "20Gi"
  135. # -- ZooKeeper data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning
  136. storageClass: "-"
  137. registryEtcd:
  138. # -- If you want to use Etcd for your registry center, change this value to true. And set zookeeper.enabled to false
  139. enabled: false
  140. # -- Etcd endpoints
  141. endpoints: ""
  142. # -- Etcd namespace
  143. namespace: "dolphinscheduler"
  144. # -- Etcd user
  145. user: ""
  146. # -- Etcd passWord
  147. passWord: ""
  148. # -- Etcd authority
  149. authority: ""
  150. # Please create a new folder: deploy/kubernetes/dolphinscheduler/etcd-certs
  151. ssl:
  152. # -- If your Etcd server has configured with ssl, change this value to true. About certification files you can see [here](https://github.com/etcd-io/jetcd/blob/main/docs/SslConfig.md) for how to convert.
  153. enabled: false
  154. # -- CertFile file path
  155. certFile: "etcd-certs/ca.crt"
  156. # -- keyCertChainFile file path
  157. keyCertChainFile: "etcd-certs/client.crt"
  158. # -- keyFile file path
  159. keyFile: "etcd-certs/client.pem"
  160. registryJdbc:
  161. # -- If you want to use JDbc for your registry center, change this value to true. And set zookeeper.enabled and registryEtcd.enabled to false
  162. enabled: false
  163. # -- Used to schedule refresh the ephemeral data/ lock
  164. termRefreshInterval: 2s
  165. # -- Used to calculate the expire time
  166. termExpireTimes: 3
  167. hikariConfig:
  168. # -- Default use same Dolphinscheduler's database, if you want to use other database please change `enabled` to `true` and change other configs
  169. enabled: false
  170. # -- Default use same Dolphinscheduler's database if you don't change this value. If you set this value, Registry jdbc's database type will use it
  171. driverClassName: com.mysql.cj.jdbc.Driver
  172. # -- Default use same Dolphinscheduler's database if you don't change this value. If you set this value, Registry jdbc's database type will use it
  173. jdbcurl: jdbc:mysql://
  174. # -- Default use same Dolphinscheduler's database if you don't change this value. If you set this value, Registry jdbc's database type will use it
  175. username: ""
  176. # -- Default use same Dolphinscheduler's database if you don't change this value. If you set this value, Registry jdbc's database type will use it
  177. password: ""
  178. ## If exists external registry and set zookeeper.enable value to false, the external registry will be used.
  179. externalRegistry:
  180. # -- If exists external registry and set `zookeeper.enable` && `registryEtcd.enabled` && `registryJdbc.enabled` to false, specify the external registry plugin name
  181. registryPluginName: "zookeeper"
  182. # -- If exists external registry and set `zookeeper.enable` && `registryEtcd.enabled` && `registryJdbc.enabled` to false, specify the external registry servers
  183. registryServers: "127.0.0.1:2181"
  184. security:
  185. authentication:
  186. # -- Authentication types (supported types: PASSWORD,LDAP,CASDOOR_SSO)
  187. type: PASSWORD
  188. # IF you set type `LDAP`, below config will be effective
  189. ldap:
  190. # -- LDAP urls
  191. urls: ldap://ldap.forumsys.com:389/
  192. # -- LDAP base dn
  193. basedn: dc=example,dc=com
  194. # -- LDAP username
  195. username: cn=read-only-admin,dc=example,dc=com
  196. # -- LDAP password
  197. password: password
  198. user:
  199. # -- Admin user account when you log-in with LDAP
  200. admin: read-only-admin
  201. # -- LDAP user identity attribute
  202. identityattribute: uid
  203. # -- LDAP user email attribute
  204. emailattribute: mail
  205. # -- action when ldap user is not exist,default value: CREATE. Optional values include(CREATE,DENY)
  206. notexistaction: CREATE
  207. ssl:
  208. # -- LDAP ssl switch
  209. enable: false
  210. # -- LDAP jks file absolute path, do not change this value
  211. truststore: "/opt/ldapkeystore.jks"
  212. # -- LDAP jks file base64 content.
  213. # If you use macOS, please run `base64 -b 0 -i /path/to/your.jks`.
  214. # If you use Linux, please run `base64 -w 0 /path/to/your.jks`.
  215. # If you use Windows, please run `certutil -f -encode /path/to/your.jks`.
  216. # Then copy the base64 content to below field in one line
  217. jksbase64content: ""
  218. # -- LDAP jks password
  219. truststorepassword: ""
  220. conf:
  221. # -- auto restart, if true, all components will be restarted automatically after the common configuration is updated. if false, you need to restart the components manually. default is false
  222. auto: false
  223. # common configuration
  224. common:
  225. # -- user data local directory path, please make sure the directory exists and have read write permissions
  226. data.basedir.path: /tmp/dolphinscheduler
  227. # -- resource storage type: HDFS, S3, OSS, GCS, ABS, NONE
  228. resource.storage.type: S3
  229. # -- resource store on HDFS/S3 path, resource file will store to this base path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
  230. resource.storage.upload.base.path: /dolphinscheduler
  231. # -- The AWS access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  232. resource.aws.access.key.id: minioadmin
  233. # -- The AWS secret access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  234. resource.aws.secret.access.key: minioadmin
  235. # -- The AWS Region to use. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  236. resource.aws.region: ca-central-1
  237. # -- The name of the bucket. You need to create them by yourself. Otherwise, the system cannot start. All buckets in Amazon S3 share a single namespace; ensure the bucket is given a unique name.
  238. resource.aws.s3.bucket.name: dolphinscheduler
  239. # -- You need to set this parameter when private cloud s3. If S3 uses public cloud, you only need to set resource.aws.region or set to the endpoint of a public cloud such as S3.cn-north-1.amazonaws.com.cn
  240. resource.aws.s3.endpoint: http://minio:9000
  241. # -- alibaba cloud access key id, required if you set resource.storage.type=OSS
  242. resource.alibaba.cloud.access.key.id: <your-access-key-id>
  243. # -- alibaba cloud access key secret, required if you set resource.storage.type=OSS
  244. resource.alibaba.cloud.access.key.secret: <your-access-key-secret>
  245. # -- alibaba cloud region, required if you set resource.storage.type=OSS
  246. resource.alibaba.cloud.region: cn-hangzhou
  247. # -- oss bucket name, required if you set resource.storage.type=OSS
  248. resource.alibaba.cloud.oss.bucket.name: dolphinscheduler
  249. # -- oss bucket endpoint, required if you set resource.storage.type=OSS
  250. resource.alibaba.cloud.oss.endpoint: https://oss-cn-hangzhou.aliyuncs.com
  251. # -- azure storage account name, required if you set resource.storage.type=ABS
  252. resource.azure.client.id: minioadmin
  253. # -- azure storage account key, required if you set resource.storage.type=ABS
  254. resource.azure.client.secret: minioadmin
  255. # -- azure storage subId, required if you set resource.storage.type=ABS
  256. resource.azure.subId: minioadmin
  257. # -- azure storage tenantId, required if you set resource.storage.type=ABS
  258. resource.azure.tenant.id: minioadmin
  259. # -- if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
  260. resource.hdfs.root.user: hdfs
  261. # -- if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
  262. resource.hdfs.fs.defaultFS: hdfs://mycluster:8020
  263. # -- whether to startup kerberos
  264. hadoop.security.authentication.startup.state: false
  265. # -- java.security.krb5.conf path
  266. java.security.krb5.conf.path: /opt/krb5.conf
  267. # -- login user from keytab username
  268. login.user.keytab.username: hdfs-mycluster@ESZ.COM
  269. # -- login user from keytab path
  270. login.user.keytab.path: /opt/hdfs.headless.keytab
  271. # -- kerberos expire time, the unit is hour
  272. kerberos.expire.time: 2
  273. # -- resourcemanager port, the default value is 8088 if not specified
  274. resource.manager.httpaddress.port: 8088
  275. # -- if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
  276. yarn.resourcemanager.ha.rm.ids: 192.168.xx.xx,192.168.xx.xx
  277. # -- if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
  278. yarn.application.status.address: http://ds1:%s/ws/v1/cluster/apps/%s
  279. # -- job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
  280. yarn.job.history.status.address: http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
  281. # -- datasource encryption enable
  282. datasource.encryption.enable: false
  283. # -- datasource encryption salt
  284. datasource.encryption.salt: '!@#$%^&*'
  285. # -- data quality option
  286. data-quality.jar.dir:
  287. # -- Whether hive SQL is executed in the same session
  288. support.hive.oneSession: false
  289. # -- use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
  290. sudo.enable: true
  291. # -- development state
  292. development.state: false
  293. # -- rpc port
  294. alert.rpc.port: 50052
  295. # -- set path of conda.sh
  296. conda.path: /opt/anaconda3/etc/profile.d/conda.sh
  297. # -- Task resource limit state
  298. task.resource.limit.state: false
  299. # -- mlflow task plugin preset repository
  300. ml.mlflow.preset_repository: https://github.com/apache/dolphinscheduler-mlflow
  301. # -- mlflow task plugin preset repository version
  302. ml.mlflow.preset_repository_version: "main"
  303. # -- way to collect applicationId: log, aop
  304. appId.collect: log
  305. common:
  306. ## Configmap
  307. configmap:
  308. # -- The jvm options for dolphinscheduler, suitable for all servers
  309. DOLPHINSCHEDULER_OPTS: ""
  310. # -- User data directory path, self configuration, please make sure the directory exists and have read write permissions
  311. DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
  312. # -- Resource store on HDFS/S3 path, please make sure the directory exists on hdfs and have read write permissions
  313. RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
  314. # dolphinscheduler env
  315. # -- Set `HADOOP_HOME` for DolphinScheduler's task environment
  316. HADOOP_HOME: "/opt/soft/hadoop"
  317. # -- Set `HADOOP_CONF_DIR` for DolphinScheduler's task environment
  318. HADOOP_CONF_DIR: "/opt/soft/hadoop/etc/hadoop"
  319. # -- Set `SPARK_HOME` for DolphinScheduler's task environment
  320. SPARK_HOME: "/opt/soft/spark"
  321. # -- Set `PYTHON_LAUNCHER` for DolphinScheduler's task environment
  322. PYTHON_LAUNCHER: "/usr/bin/python/bin/python3"
  323. # -- Set `JAVA_HOME` for DolphinScheduler's task environment
  324. JAVA_HOME: "/opt/java/openjdk"
  325. # -- Set `HIVE_HOME` for DolphinScheduler's task environment
  326. HIVE_HOME: "/opt/soft/hive"
  327. # -- Set `FLINK_HOME` for DolphinScheduler's task environment
  328. FLINK_HOME: "/opt/soft/flink"
  329. # -- Set `DATAX_LAUNCHER` for DolphinScheduler's task environment
  330. DATAX_LAUNCHER: "/opt/soft/datax/bin/datax.py"
  331. ## Shared storage persistence mounted into api, master and worker, such as Hadoop, Spark, Flink and DataX binary package
  332. sharedStoragePersistence:
  333. # -- Set `common.sharedStoragePersistence.enabled` to `true` to mount a shared storage volume for Hadoop, Spark binary and etc
  334. enabled: false
  335. # -- The mount path for the shared storage volume
  336. mountPath: "/opt/soft"
  337. # -- `PersistentVolumeClaim` access modes, must be `ReadWriteMany`
  338. accessModes:
  339. - "ReadWriteMany"
  340. # -- Shared Storage persistent volume storage class, must support the access mode: ReadWriteMany
  341. storageClassName: "-"
  342. # -- `PersistentVolumeClaim` size
  343. storage: "20Gi"
  344. ## If RESOURCE_STORAGE_TYPE is HDFS and FS_DEFAULT_FS is file:///, fsFileResourcePersistence should be enabled for resource storage
  345. fsFileResourcePersistence:
  346. # -- Set `common.fsFileResourcePersistence.enabled` to `true` to mount a new file resource volume for `api` and `worker`
  347. enabled: false
  348. # -- `PersistentVolumeClaim` access modes, must be `ReadWriteMany`
  349. accessModes:
  350. - "ReadWriteMany"
  351. # -- Resource persistent volume storage class, must support the access mode: `ReadWriteMany`
  352. storageClassName: "-"
  353. # -- `PersistentVolumeClaim` size
  354. storage: "20Gi"
  355. master:
  356. # -- Enable or disable the Master component
  357. enabled: true
  358. # -- PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  359. podManagementPolicy: "Parallel"
  360. # -- Replicas is the desired number of replicas of the given Template.
  361. replicas: "3"
  362. # -- You can use annotations to attach arbitrary non-identifying metadata to objects.
  363. # Clients such as tools and libraries can retrieve this metadata.
  364. annotations: {}
  365. # -- Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  366. # More info: [node-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity)
  367. affinity: {}
  368. # -- NodeSelector is a selector which must be true for the pod to fit on a node.
  369. # Selector which must match a node's labels for the pod to be scheduled on that node.
  370. # More info: [assign-pod-node](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)
  371. nodeSelector: {}
  372. # -- Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  373. # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  374. tolerations: []
  375. # -- Compute Resources required by this container.
  376. # More info: [manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
  377. resources: {}
  378. # resources:
  379. # limits:
  380. # memory: "8Gi"
  381. # cpu: "4"
  382. # requests:
  383. # memory: "2Gi"
  384. # cpu: "500m"
  385. # -- Periodic probe of container liveness. Container will be restarted if the probe fails.
  386. # More info: [container-probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)
  387. livenessProbe:
  388. # -- Turn on and off liveness probe
  389. enabled: true
  390. # -- Delay before liveness probe is initiated
  391. initialDelaySeconds: "30"
  392. # -- How often to perform the probe
  393. periodSeconds: "30"
  394. # -- When the probe times out
  395. timeoutSeconds: "5"
  396. # -- Minimum consecutive failures for the probe
  397. failureThreshold: "3"
  398. # -- Minimum consecutive successes for the probe
  399. successThreshold: "1"
  400. # -- Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails.
  401. # More info: [container-probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)
  402. readinessProbe:
  403. # -- Turn on and off readiness probe
  404. enabled: true
  405. # -- Delay before readiness probe is initiated
  406. initialDelaySeconds: "30"
  407. # -- How often to perform the probe
  408. periodSeconds: "30"
  409. # -- When the probe times out
  410. timeoutSeconds: "5"
  411. # -- Minimum consecutive failures for the probe
  412. failureThreshold: "3"
  413. # -- Minimum consecutive successes for the probe
  414. successThreshold: "1"
  415. # -- PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  416. # The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  417. # Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  418. # A claim in this list takes precedence over any volumes in the template, with the same name.
  419. persistentVolumeClaim:
  420. # -- Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master`
  421. enabled: false
  422. # -- `PersistentVolumeClaim` access modes
  423. accessModes:
  424. - "ReadWriteOnce"
  425. # -- `Master` logs data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning
  426. storageClassName: "-"
  427. # -- `PersistentVolumeClaim` size
  428. storage: "20Gi"
  429. env:
  430. # -- The jvm options for master server
  431. JAVA_OPTS: "-Xms1g -Xmx1g -Xmn512m"
  432. # -- Master execute thread number to limit process instances
  433. MASTER_EXEC_THREADS: "100"
  434. # -- Master execute task number in parallel per process instance
  435. MASTER_EXEC_TASK_NUM: "20"
  436. # -- Master dispatch task number per batch
  437. MASTER_DISPATCH_TASK_NUM: "3"
  438. # -- Master host selector to select a suitable worker, optional values include Random, RoundRobin, LowerWeight
  439. MASTER_HOST_SELECTOR: "LowerWeight"
  440. # -- Master max heartbeat interval
  441. MASTER_MAX_HEARTBEAT_INTERVAL: "10s"
  442. # -- Master heartbeat error threshold
  443. MASTER_HEARTBEAT_ERROR_THRESHOLD: "5"
  444. # -- Master commit task retry times
  445. MASTER_TASK_COMMIT_RETRYTIMES: "5"
  446. # -- master commit task interval, the unit is second
  447. MASTER_TASK_COMMIT_INTERVAL: "1s"
  448. # -- master state wheel interval, the unit is second
  449. MASTER_STATE_WHEEL_INTERVAL: "5s"
  450. # -- If set true, will open master overload protection
  451. MASTER_SERVER_LOAD_PROTECTION_ENABLED: false
  452. # -- Master max cpu usage, when the master's cpu usage is smaller then this value, master server can execute workflow.
  453. MASTER_SERVER_LOAD_PROTECTION_MAX_CPU_USAGE_PERCENTAGE_THRESHOLDS: 0.7
  454. # -- Master max JVM memory usage , when the master's jvm memory usage is smaller then this value, master server can execute workflow.
  455. MASTER_SERVER_LOAD_PROTECTION_MAX_JVM_MEMORY_USAGE_PERCENTAGE_THRESHOLDS: 0.7
  456. # -- Master max System memory usage , when the master's system memory usage is smaller then this value, master server can execute workflow.
  457. MASTER_SERVER_LOAD_PROTECTION_MAX_SYSTEM_MEMORY_USAGE_PERCENTAGE_THRESHOLDS: 0.7
  458. # -- Master max disk usage , when the master's disk usage is smaller then this value, master server can execute workflow.
  459. MASTER_SERVER_LOAD_PROTECTION_MAX_DISK_USAGE_PERCENTAGE_THRESHOLDS: 0.7
  460. # -- Master failover interval, the unit is minute
  461. MASTER_FAILOVER_INTERVAL: "10m"
  462. # -- Master kill application when handle failover
  463. MASTER_KILL_APPLICATION_WHEN_HANDLE_FAILOVER: "true"
  464. service:
  465. # -- annotations may need to be set when want to scrapy metrics by prometheus but not install prometheus operator
  466. annotations: {}
  467. # -- serviceMonitor for prometheus operator
  468. serviceMonitor:
  469. # -- Enable or disable master serviceMonitor
  470. enabled: false
  471. # -- serviceMonitor.interval interval at which metrics should be scraped
  472. interval: 15s
  473. # -- serviceMonitor.path path of the metrics endpoint
  474. path: /actuator/prometheus
  475. # -- serviceMonitor.labels ServiceMonitor extra labels
  476. labels: {}
  477. # -- serviceMonitor.annotations ServiceMonitor annotations
  478. annotations: {}
  479. worker:
  480. # -- Enable or disable the Worker component
  481. enabled: true
  482. # -- PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  483. podManagementPolicy: "Parallel"
  484. # -- Replicas is the desired number of replicas of the given Template.
  485. replicas: "3"
  486. # -- You can use annotations to attach arbitrary non-identifying metadata to objects.
  487. # Clients such as tools and libraries can retrieve this metadata.
  488. annotations: {}
  489. # -- Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  490. # More info: [node-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity)
  491. affinity: {}
  492. # -- NodeSelector is a selector which must be true for the pod to fit on a node.
  493. # Selector which must match a node's labels for the pod to be scheduled on that node.
  494. # More info: [assign-pod-node](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)
  495. nodeSelector: {}
  496. # -- Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  497. # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  498. tolerations: [ ]
  499. # -- Compute Resources required by this container.
  500. # More info: [manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
  501. resources: {}
  502. # resources:
  503. # limits:
  504. # memory: "8Gi"
  505. # cpu: "4"
  506. # requests:
  507. # memory: "2Gi"
  508. # cpu: "500m"
  509. # -- Periodic probe of container liveness. Container will be restarted if the probe fails.
  510. # More info: [container-probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)
  511. livenessProbe:
  512. # -- Turn on and off liveness probe
  513. enabled: true
  514. # -- Delay before liveness probe is initiated
  515. initialDelaySeconds: "30"
  516. # -- How often to perform the probe
  517. periodSeconds: "30"
  518. # -- When the probe times out
  519. timeoutSeconds: "5"
  520. # -- Minimum consecutive failures for the probe
  521. failureThreshold: "3"
  522. # -- Minimum consecutive successes for the probe
  523. successThreshold: "1"
  524. # -- Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails.
  525. # More info: [container-probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)
  526. readinessProbe:
  527. # -- Turn on and off readiness probe
  528. enabled: true
  529. # -- Delay before readiness probe is initiated
  530. initialDelaySeconds: "30"
  531. # -- How often to perform the probe
  532. periodSeconds: "30"
  533. # -- When the probe times out
  534. timeoutSeconds: "5"
  535. # -- Minimum consecutive failures for the probe
  536. failureThreshold: "3"
  537. # -- Minimum consecutive successes for the probe
  538. successThreshold: "1"
  539. # -- PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  540. # The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  541. # Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  542. # A claim in this list takes precedence over any volumes in the template, with the same name.
  543. persistentVolumeClaim:
  544. # -- Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker`
  545. enabled: false
  546. ## dolphinscheduler data volume
  547. dataPersistentVolume:
  548. # -- Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker`
  549. enabled: false
  550. # -- `PersistentVolumeClaim` access modes
  551. accessModes:
  552. - "ReadWriteOnce"
  553. # -- `Worker` data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning
  554. storageClassName: "-"
  555. # -- `PersistentVolumeClaim` size
  556. storage: "20Gi"
  557. ## dolphinscheduler logs volume
  558. logsPersistentVolume:
  559. # -- Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker`
  560. enabled: false
  561. # -- `PersistentVolumeClaim` access modes
  562. accessModes:
  563. - "ReadWriteOnce"
  564. # -- `Worker` logs data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning
  565. storageClassName: "-"
  566. # -- `PersistentVolumeClaim` size
  567. storage: "20Gi"
  568. env:
  569. # -- If set true, will open worker overload protection
  570. WORKER_SERVER_LOAD_PROTECTION_ENABLED: false
  571. # -- Worker max cpu usage, when the worker's cpu usage is smaller then this value, worker server can be dispatched tasks.
  572. WORKER_SERVER_LOAD_PROTECTION_MAX_CPU_USAGE_PERCENTAGE_THRESHOLDS: 0.7
  573. # -- Worker max jvm memory usage , when the worker's jvm memory usage is smaller then this value, worker server can be dispatched tasks.
  574. WORKER_SERVER_LOAD_PROTECTION_MAX_JVM_MEMORY_USAGE_PERCENTAGE_THRESHOLDS: 0.7
  575. # -- Worker max memory usage , when the worker's memory usage is smaller then this value, worker server can be dispatched tasks.
  576. WORKER_SERVER_LOAD_PROTECTION_MAX_SYSTEM_MEMORY_USAGE_PERCENTAGE_THRESHOLDS: 0.7
  577. # -- Worker max disk usage , when the worker's disk usage is smaller then this value, worker server can be dispatched tasks.
  578. WORKER_SERVER_LOAD_PROTECTION_MAX_DISK_USAGE_PERCENTAGE_THRESHOLDS: 0.7
  579. # -- Worker execute thread number to limit task instances
  580. WORKER_EXEC_THREADS: "100"
  581. # -- Worker heartbeat interval
  582. WORKER_MAX_HEARTBEAT_INTERVAL: "10s"
  583. # -- Worker host weight to dispatch tasks
  584. WORKER_HOST_WEIGHT: "100"
  585. # -- tenant corresponds to the user of the system, which is used by the worker to submit the job. If system does not have this user, it will be automatically created after the parameter worker.tenant.auto.create is true.
  586. WORKER_TENANT_CONFIG_AUTO_CREATE_TENANT_ENABLED: true
  587. # -- Scenes to be used for distributed users. For example, users created by FreeIpa are stored in LDAP. This parameter only applies to Linux, When this parameter is true, worker.tenant.auto.create has no effect and will not automatically create tenants.
  588. WORKER_TENANT_CONFIG_DISTRIBUTED_TENANT: false
  589. # -- If set true, will use worker bootstrap user as the tenant to execute task when the tenant is `default`;
  590. DEFAULT_TENANT_ENABLED: false
  591. keda:
  592. # -- Enable or disable the Keda component
  593. enabled: false
  594. # -- Keda namespace labels
  595. namespaceLabels: { }
  596. # -- How often KEDA polls the DolphinScheduler DB to report new scale requests to the HPA
  597. pollingInterval: 5
  598. # -- How many seconds KEDA will wait before scaling to zero.
  599. # Note that HPA has a separate cooldown period for scale-downs
  600. cooldownPeriod: 30
  601. # -- Minimum number of workers created by keda
  602. minReplicaCount: 0
  603. # -- Maximum number of workers created by keda
  604. maxReplicaCount: 3
  605. # -- Specify HPA related options
  606. advanced: { }
  607. # horizontalPodAutoscalerConfig:
  608. # behavior:
  609. # scaleDown:
  610. # stabilizationWindowSeconds: 300
  611. # policies:
  612. # - type: Percent
  613. # value: 100
  614. # periodSeconds: 15
  615. service:
  616. # -- annotations may need to be set when want to scrapy metrics by prometheus but not install prometheus operator
  617. annotations: {}
  618. # -- serviceMonitor for prometheus operator
  619. serviceMonitor:
  620. # -- Enable or disable worker serviceMonitor
  621. enabled: false
  622. # -- serviceMonitor.interval interval at which metrics should be scraped
  623. interval: 15s
  624. # -- serviceMonitor.path path of the metrics endpoint
  625. path: /actuator/prometheus
  626. # -- serviceMonitor.labels ServiceMonitor extra labels
  627. labels: {}
  628. # -- serviceMonitor.annotations ServiceMonitor annotations
  629. annotations: {}
  630. alert:
  631. # -- Enable or disable the Alert-Server component
  632. enabled: true
  633. # -- Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  634. replicas: 1
  635. # -- The deployment strategy to use to replace existing pods with new ones.
  636. strategy:
  637. # -- Type of deployment. Can be "Recreate" or "RollingUpdate"
  638. type: "RollingUpdate"
  639. rollingUpdate:
  640. # -- The maximum number of pods that can be scheduled above the desired number of pods
  641. maxSurge: "25%"
  642. # -- The maximum number of pods that can be unavailable during the update
  643. maxUnavailable: "25%"
  644. # -- You can use annotations to attach arbitrary non-identifying metadata to objects.
  645. # Clients such as tools and libraries can retrieve this metadata.
  646. annotations: {}
  647. # -- Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  648. # More info: [node-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity)
  649. affinity: {}
  650. # -- NodeSelector is a selector which must be true for the pod to fit on a node.
  651. # Selector which must match a node's labels for the pod to be scheduled on that node.
  652. # More info: [assign-pod-node](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)
  653. nodeSelector: {}
  654. # -- Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  655. # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  656. tolerations: []
  657. # -- Compute Resources required by this container.
  658. # More info: [manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
  659. resources: {}
  660. # resources:
  661. # limits:
  662. # memory: "2Gi"
  663. # cpu: "1"
  664. # requests:
  665. # memory: "1Gi"
  666. # cpu: "500m"
  667. # -- Periodic probe of container liveness. Container will be restarted if the probe fails.
  668. # More info: [container-probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)
  669. livenessProbe:
  670. # -- Turn on and off liveness probe
  671. enabled: true
  672. # -- Delay before liveness probe is initiated
  673. initialDelaySeconds: "30"
  674. # -- How often to perform the probe
  675. periodSeconds: "30"
  676. # -- When the probe times out
  677. timeoutSeconds: "5"
  678. # -- Minimum consecutive failures for the probe
  679. failureThreshold: "3"
  680. # -- Minimum consecutive successes for the probe
  681. successThreshold: "1"
  682. # -- Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails.
  683. # More info: [container-probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)
  684. readinessProbe:
  685. # -- Turn on and off readiness probe
  686. enabled: true
  687. # -- Delay before readiness probe is initiated
  688. initialDelaySeconds: "30"
  689. # -- How often to perform the probe
  690. periodSeconds: "30"
  691. # -- When the probe times out
  692. timeoutSeconds: "5"
  693. # -- Minimum consecutive failures for the probe
  694. failureThreshold: "3"
  695. # -- Minimum consecutive successes for the probe
  696. successThreshold: "1"
  697. # -- PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  698. # More info: [persistentvolumeclaims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
  699. persistentVolumeClaim:
  700. # -- Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert`
  701. enabled: false
  702. # -- `PersistentVolumeClaim` access modes
  703. accessModes:
  704. - "ReadWriteOnce"
  705. # -- `Alert` logs data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning
  706. storageClassName: "-"
  707. # -- `PersistentVolumeClaim` size
  708. storage: "20Gi"
  709. env:
  710. # -- The jvm options for alert server
  711. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  712. service:
  713. # -- annotations may need to be set when want to scrapy metrics by prometheus but not install prometheus operator
  714. annotations: {}
  715. # -- serviceMonitor for prometheus operator
  716. serviceMonitor:
  717. # -- Enable or disable alert-server serviceMonitor
  718. enabled: false
  719. # -- serviceMonitor.interval interval at which metrics should be scraped
  720. interval: 15s
  721. # -- serviceMonitor.path path of the metrics endpoint
  722. path: /actuator/prometheus
  723. # -- serviceMonitor.labels ServiceMonitor extra labels
  724. labels: {}
  725. # -- serviceMonitor.annotations ServiceMonitor annotations
  726. annotations: {}
  727. api:
  728. # -- Enable or disable the API-Server component
  729. enabled: true
  730. # -- Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  731. replicas: "1"
  732. # -- The deployment strategy to use to replace existing pods with new ones.
  733. strategy:
  734. # -- Type of deployment. Can be "Recreate" or "RollingUpdate"
  735. type: "RollingUpdate"
  736. rollingUpdate:
  737. # -- The maximum number of pods that can be scheduled above the desired number of pods
  738. maxSurge: "25%"
  739. # -- The maximum number of pods that can be unavailable during the update
  740. maxUnavailable: "25%"
  741. # -- You can use annotations to attach arbitrary non-identifying metadata to objects.
  742. # Clients such as tools and libraries can retrieve this metadata.
  743. annotations: {}
  744. # -- Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  745. # More info: [node-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity)
  746. affinity: { }
  747. # -- NodeSelector is a selector which must be true for the pod to fit on a node.
  748. # Selector which must match a node's labels for the pod to be scheduled on that node.
  749. # More info: [assign-pod-node](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/)
  750. nodeSelector: { }
  751. # -- Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  752. # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  753. tolerations: [ ]
  754. # -- Compute Resources required by this container.
  755. # More info: [manage-resources-containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)
  756. resources: {}
  757. # resources:
  758. # limits:
  759. # memory: "2Gi"
  760. # cpu: "1"
  761. # requests:
  762. # memory: "1Gi"
  763. # cpu: "500m"
  764. # -- Periodic probe of container liveness. Container will be restarted if the probe fails.
  765. # More info: [container-probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)
  766. livenessProbe:
  767. # -- Turn on and off liveness probe
  768. enabled: true
  769. # -- Delay before liveness probe is initiated
  770. initialDelaySeconds: "30"
  771. # -- How often to perform the probe
  772. periodSeconds: "30"
  773. # -- When the probe times out
  774. timeoutSeconds: "5"
  775. # -- Minimum consecutive failures for the probe
  776. failureThreshold: "3"
  777. # -- Minimum consecutive successes for the probe
  778. successThreshold: "1"
  779. # -- Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails.
  780. # More info: [container-probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)
  781. readinessProbe:
  782. # -- Turn on and off readiness probe
  783. enabled: true
  784. # -- Delay before readiness probe is initiated
  785. initialDelaySeconds: "30"
  786. # -- How often to perform the probe
  787. periodSeconds: "30"
  788. # -- When the probe times out
  789. timeoutSeconds: "5"
  790. # -- Minimum consecutive failures for the probe
  791. failureThreshold: "3"
  792. # -- Minimum consecutive successes for the probe
  793. successThreshold: "1"
  794. # -- PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  795. # More info: [persistentvolumeclaims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
  796. persistentVolumeClaim:
  797. # -- Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api`
  798. enabled: false
  799. # -- `PersistentVolumeClaim` access modes
  800. accessModes:
  801. - "ReadWriteOnce"
  802. # -- `api` logs data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning
  803. storageClassName: "-"
  804. # -- `PersistentVolumeClaim` size
  805. storage: "20Gi"
  806. service:
  807. # -- type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer
  808. type: "ClusterIP"
  809. # -- clusterIP is the IP address of the service and is usually assigned randomly by the master
  810. clusterIP: ""
  811. # -- nodePort is the port on each node on which this api service is exposed when type=NodePort
  812. nodePort: ""
  813. # -- pythonNodePort is the port on each node on which this python api service is exposed when type=NodePort
  814. pythonNodePort: ""
  815. # -- externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service
  816. externalIPs: []
  817. # -- externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service, requires Type to be ExternalName
  818. externalName: ""
  819. # -- loadBalancerIP when service.type is LoadBalancer. LoadBalancer will get created with the IP specified in this field
  820. loadBalancerIP: ""
  821. # -- annotations may need to be set when service.type is LoadBalancer
  822. # service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
  823. annotations: {}
  824. # -- serviceMonitor for prometheus operator
  825. serviceMonitor:
  826. # -- Enable or disable api-server serviceMonitor
  827. enabled: false
  828. # -- serviceMonitor.interval interval at which metrics should be scraped
  829. interval: 15s
  830. # -- serviceMonitor.path path of the metrics endpoint
  831. path: /dolphinscheduler/actuator/prometheus
  832. # -- serviceMonitor.labels ServiceMonitor extra labels
  833. labels: {}
  834. # -- serviceMonitor.annotations ServiceMonitor annotations
  835. annotations: {}
  836. env:
  837. # -- The jvm options for api server
  838. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  839. taskTypeFilter:
  840. # -- Enable or disable the task type filter.
  841. # If set to true, the API-Server will return tasks of a specific type set in api.taskTypeFilter.task
  842. # Note: This feature only filters tasks to return a specific type on the WebUI. However, you can still create any task that DolphinScheduler supports via the API.
  843. enabled: false
  844. # -- taskTypeFilter.taskType task type
  845. # -- ref: [task-type-config.yaml](https://github.com/apache/dolphinscheduler/blob/dev/dolphinscheduler-api/src/main/resources/task-type-config.yaml)
  846. task: {}
  847. # example task sets
  848. # universal:
  849. # - 'SQL'
  850. # cloud: []
  851. # logic: []
  852. # dataIntegration: []
  853. # dataQuality: []
  854. # machineLearning: []
  855. # other: []
  856. ingress:
  857. # -- Enable ingress
  858. enabled: false
  859. # -- Ingress host
  860. host: "dolphinscheduler.org"
  861. # -- Ingress path
  862. path: "/dolphinscheduler"
  863. # -- Ingress annotations
  864. annotations: {}
  865. tls:
  866. # -- Enable ingress tls
  867. enabled: false
  868. # -- Ingress tls secret name
  869. secretName: "dolphinscheduler-tls"