values.yaml 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. #
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. #
  17. # Default values for dolphinscheduler-chart.
  18. # This is a YAML-formatted file.
  19. # Declare variables to be passed into your templates.
  20. timezone: "Asia/Shanghai"
  21. # Used to detect whether dolphinscheduler dependent services such as database are ready
  22. initImage:
  23. pullPolicy: "IfNotPresent"
  24. busybox: "busybox:1.30.1"
  25. image:
  26. registry: "dolphinscheduler.docker.scarf.sh/apache"
  27. tag: "dev-SNAPSHOT"
  28. pullPolicy: "IfNotPresent"
  29. pullSecret: ""
  30. master: dolphinscheduler-master
  31. worker: dolphinscheduler-worker
  32. api: dolphinscheduler-api
  33. alert: dolphinscheduler-alert-server
  34. tools: dolphinscheduler-tools
  35. ## If not exists external database, by default, Dolphinscheduler's database will use it.
  36. postgresql:
  37. enabled: true
  38. postgresqlUsername: "root"
  39. postgresqlPassword: "root"
  40. postgresqlDatabase: "dolphinscheduler"
  41. driverClassName: "org.postgresql.Driver"
  42. params: "characterEncoding=utf8"
  43. persistence:
  44. enabled: false
  45. size: "20Gi"
  46. storageClass: "-"
  47. mysql:
  48. enabled: false
  49. driverClassName: "com.mysql.cj.jdbc.Driver"
  50. auth:
  51. username: "ds"
  52. password: "ds"
  53. database: "dolphinscheduler"
  54. params: "characterEncoding=utf8"
  55. primary:
  56. persistence:
  57. enabled: false
  58. size: "20Gi"
  59. storageClass: "-"
  60. minio:
  61. enabled: true
  62. auth:
  63. rootUser: minioadmin
  64. rootPassword: minioadmin
  65. persistence:
  66. enabled: false
  67. defaultBuckets: "dolphinscheduler"
  68. ## If exists external database, and set postgresql.enable value to false.
  69. ## external database will be used, otherwise Dolphinscheduler's database will be used.
  70. externalDatabase:
  71. enabled: false
  72. type: "postgresql"
  73. host: "localhost"
  74. port: "5432"
  75. username: "root"
  76. password: "root"
  77. database: "dolphinscheduler"
  78. params: "characterEncoding=utf8"
  79. driverClassName: "org.postgresql.Driver"
  80. ## If not exists external registry, the zookeeper registry will be used by default.
  81. zookeeper:
  82. enabled: true
  83. service:
  84. port: 2181
  85. fourlwCommandsWhitelist: "srvr,ruok,wchs,cons"
  86. persistence:
  87. enabled: false
  88. size: "20Gi"
  89. storageClass: "-"
  90. registryEtcd:
  91. enabled: false
  92. endpoints: ""
  93. namespace: "dolphinscheduler"
  94. user: ""
  95. passWord: ""
  96. authority: ""
  97. # Please create a new folder: deploy/kubernetes/dolphinscheduler/etcd-certs
  98. ssl:
  99. enabled: false
  100. certFile: "etcd-certs/ca.crt"
  101. keyCertChainFile: "etcd-certs/client.crt"
  102. keyFile: "etcd-certs/client.pem"
  103. registryJdbc:
  104. enabled: false
  105. termRefreshInterval: 2s
  106. termExpireTimes: 3
  107. hikariConfig:
  108. # Default use same Dolphinscheduler's database, if you want to use other database please change `enabled` to `true` and change other configs
  109. enabled: false
  110. driverClassName: com.mysql.cj.jdbc.Driver
  111. jdbcurl: jdbc:mysql://
  112. username: ""
  113. password: ""
  114. ## If exists external registry and set zookeeper.enable value to false, the external registry will be used.
  115. externalRegistry:
  116. registryPluginName: "zookeeper"
  117. registryServers: "127.0.0.1:2181"
  118. security:
  119. authentication:
  120. # Authentication types (supported types: PASSWORD,LDAP,CASDOOR_SSO)
  121. type: PASSWORD
  122. # IF you set type `LDAP`, below config will be effective
  123. ldap:
  124. urls: ldap://ldap.forumsys.com:389/
  125. basedn: dc=example,dc=com
  126. username: cn=read-only-admin,dc=example,dc=com
  127. password: password
  128. user:
  129. admin: read-only-admin
  130. identityattribute: uid
  131. emailattribute: mail
  132. notexistaction: CREATE
  133. ssl:
  134. enable: false
  135. # do not change this value
  136. truststore: "/opt/ldapkeystore.jks"
  137. # if you use macOS, please run `base64 -b 0 -i /path/to/your.jks`
  138. # if you use Linux, please run `base64 -w 0 /path/to/your.jks`
  139. # if you use Windows, please run `certutil -f -encode /path/to/your.jks`
  140. # Then copy the base64 content to below field in one line
  141. jksbase64content: ""
  142. truststorepassword: ""
  143. conf:
  144. # auto restart, if true, all components will be restarted automatically after the common configuration is updated. if false, you need to restart the components manually. default is false
  145. auto: false
  146. # common configuration
  147. common:
  148. # user data local directory path, please make sure the directory exists and have read write permissions
  149. data.basedir.path: /tmp/dolphinscheduler
  150. # resource storage type: HDFS, S3, OSS, GCS, ABS, NONE
  151. resource.storage.type: S3
  152. # resource store on HDFS/S3 path, resource file will store to this base path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
  153. resource.storage.upload.base.path: /dolphinscheduler
  154. # The AWS access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  155. resource.aws.access.key.id: minioadmin
  156. # The AWS secret access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  157. resource.aws.secret.access.key: minioadmin
  158. # The AWS Region to use. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  159. resource.aws.region: ca-central-1
  160. # The name of the bucket. You need to create them by yourself. Otherwise, the system cannot start. All buckets in Amazon S3 share a single namespace; ensure the bucket is given a unique name.
  161. resource.aws.s3.bucket.name: dolphinscheduler
  162. # You need to set this parameter when private cloud s3. If S3 uses public cloud, you only need to set resource.aws.region or set to the endpoint of a public cloud such as S3.cn-north-1.amazonaws.com.cn
  163. resource.aws.s3.endpoint: http://minio:9000
  164. # alibaba cloud access key id, required if you set resource.storage.type=OSS
  165. resource.alibaba.cloud.access.key.id: <your-access-key-id>
  166. # alibaba cloud access key secret, required if you set resource.storage.type=OSS
  167. resource.alibaba.cloud.access.key.secret: <your-access-key-secret>
  168. # alibaba cloud region, required if you set resource.storage.type=OSS
  169. resource.alibaba.cloud.region: cn-hangzhou
  170. # oss bucket name, required if you set resource.storage.type=OSS
  171. resource.alibaba.cloud.oss.bucket.name: dolphinscheduler
  172. # oss bucket endpoint, required if you set resource.storage.type=OSS
  173. resource.alibaba.cloud.oss.endpoint: https://oss-cn-hangzhou.aliyuncs.com
  174. # azure storage account name, required if you set resource.storage.type=ABS
  175. resource.azure.client.id: minioadmin
  176. # azure storage account key, required if you set resource.storage.type=ABS
  177. resource.azure.client.secret: minioadmin
  178. # azure storage subId, required if you set resource.storage.type=ABS
  179. resource.azure.subId: minioadmin
  180. # azure storage tenantId, required if you set resource.storage.type=ABS
  181. resource.azure.tenant.id: minioadmin
  182. # if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
  183. resource.hdfs.root.user: hdfs
  184. # if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
  185. resource.hdfs.fs.defaultFS: hdfs://mycluster:8020
  186. # whether to startup kerberos
  187. hadoop.security.authentication.startup.state: false
  188. # java.security.krb5.conf path
  189. java.security.krb5.conf.path: /opt/krb5.conf
  190. # login user from keytab username
  191. login.user.keytab.username: hdfs-mycluster@ESZ.COM
  192. # login user from keytab path
  193. login.user.keytab.path: /opt/hdfs.headless.keytab
  194. # kerberos expire time, the unit is hour
  195. kerberos.expire.time: 2
  196. # resourcemanager port, the default value is 8088 if not specified
  197. resource.manager.httpaddress.port: 8088
  198. # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
  199. yarn.resourcemanager.ha.rm.ids: 192.168.xx.xx,192.168.xx.xx
  200. # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
  201. yarn.application.status.address: http://ds1:%s/ws/v1/cluster/apps/%s
  202. # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
  203. yarn.job.history.status.address: http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
  204. # datasource encryption enable
  205. datasource.encryption.enable: false
  206. # datasource encryption salt
  207. datasource.encryption.salt: '!@#$%^&*'
  208. # data quality option
  209. data-quality.jar.name: dolphinscheduler-data-quality-dev-SNAPSHOT.jar
  210. # Whether hive SQL is executed in the same session
  211. support.hive.oneSession: false
  212. # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
  213. sudo.enable: true
  214. # development state
  215. development.state: false
  216. # rpc port
  217. alert.rpc.port: 50052
  218. # set path of conda.sh
  219. conda.path: /opt/anaconda3/etc/profile.d/conda.sh
  220. # Task resource limit state
  221. task.resource.limit.state: false
  222. # mlflow task plugin preset repository
  223. ml.mlflow.preset_repository: https://github.com/apache/dolphinscheduler-mlflow
  224. # mlflow task plugin preset repository version
  225. ml.mlflow.preset_repository_version: "main"
  226. # way to collect applicationId: log, aop
  227. appId.collect: log
  228. common:
  229. ## Configmap
  230. configmap:
  231. DOLPHINSCHEDULER_OPTS: ""
  232. DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
  233. RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
  234. # dolphinscheduler env
  235. HADOOP_HOME: "/opt/soft/hadoop"
  236. HADOOP_CONF_DIR: "/opt/soft/hadoop/etc/hadoop"
  237. SPARK_HOME: "/opt/soft/spark"
  238. PYTHON_LAUNCHER: "/usr/bin/python/bin/python3"
  239. JAVA_HOME: "/opt/java/openjdk"
  240. HIVE_HOME: "/opt/soft/hive"
  241. FLINK_HOME: "/opt/soft/flink"
  242. DATAX_LAUNCHER: "/opt/soft/datax/bin/datax.py"
  243. ## Shared storage persistence mounted into api, master and worker, such as Hadoop, Spark, Flink and DataX binary package
  244. sharedStoragePersistence:
  245. enabled: false
  246. mountPath: "/opt/soft"
  247. accessModes:
  248. - "ReadWriteMany"
  249. ## storageClassName must support the access mode: ReadWriteMany
  250. storageClassName: "-"
  251. storage: "20Gi"
  252. ## If RESOURCE_STORAGE_TYPE is HDFS and FS_DEFAULT_FS is file:///, fsFileResourcePersistence should be enabled for resource storage
  253. fsFileResourcePersistence:
  254. enabled: false
  255. accessModes:
  256. - "ReadWriteMany"
  257. ## storageClassName must support the access mode: ReadWriteMany
  258. storageClassName: "-"
  259. storage: "20Gi"
  260. master:
  261. ## Enable or disable the Master component
  262. enabled: true
  263. ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  264. podManagementPolicy: "Parallel"
  265. ## Replicas is the desired number of replicas of the given Template.
  266. replicas: "3"
  267. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  268. ## Clients such as tools and libraries can retrieve this metadata.
  269. annotations: {}
  270. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  271. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  272. affinity: {}
  273. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  274. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  275. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  276. nodeSelector: {}
  277. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  278. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  279. tolerations: []
  280. ## Compute Resources required by this container. Cannot be updated.
  281. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  282. resources: {}
  283. # resources:
  284. # limits:
  285. # memory: "8Gi"
  286. # cpu: "4"
  287. # requests:
  288. # memory: "2Gi"
  289. # cpu: "500m"
  290. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  291. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  292. livenessProbe:
  293. enabled: true
  294. initialDelaySeconds: "30"
  295. periodSeconds: "30"
  296. timeoutSeconds: "5"
  297. failureThreshold: "3"
  298. successThreshold: "1"
  299. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  300. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  301. readinessProbe:
  302. enabled: true
  303. initialDelaySeconds: "30"
  304. periodSeconds: "30"
  305. timeoutSeconds: "5"
  306. failureThreshold: "3"
  307. successThreshold: "1"
  308. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  309. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  310. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  311. ## A claim in this list takes precedence over any volumes in the template, with the same name.
  312. persistentVolumeClaim:
  313. enabled: false
  314. accessModes:
  315. - "ReadWriteOnce"
  316. storageClassName: "-"
  317. storage: "20Gi"
  318. env:
  319. JAVA_OPTS: "-Xms1g -Xmx1g -Xmn512m"
  320. MASTER_EXEC_THREADS: "100"
  321. MASTER_EXEC_TASK_NUM: "20"
  322. MASTER_DISPATCH_TASK_NUM: "3"
  323. MASTER_HOST_SELECTOR: "LowerWeight"
  324. MASTER_HEARTBEAT_INTERVAL: "10s"
  325. MASTER_HEARTBEAT_ERROR_THRESHOLD: "5"
  326. MASTER_TASK_COMMIT_RETRYTIMES: "5"
  327. MASTER_TASK_COMMIT_INTERVAL: "1s"
  328. MASTER_STATE_WHEEL_INTERVAL: "5s"
  329. MASTER_MAX_CPU_LOAD_AVG: "1"
  330. MASTER_RESERVED_MEMORY: "0.3"
  331. MASTER_FAILOVER_INTERVAL: "10m"
  332. MASTER_KILL_APPLICATION_WHEN_HANDLE_FAILOVER: "true"
  333. service:
  334. # annotations may need to be set when want to scrapy metrics by prometheus but not install prometheus operator
  335. annotations: {}
  336. # serviceMonitor for prometheus operator
  337. serviceMonitor:
  338. # -- Enable or disable master serviceMonitor
  339. enabled: false
  340. # -- @param serviceMonitor.interval interval at which metrics should be scraped
  341. interval: 15s
  342. # -- @param serviceMonitor.path path of the metrics endpoint
  343. path: /actuator/prometheus
  344. # -- @param serviceMonitor.labels ServiceMonitor extra labels
  345. labels: {}
  346. # -- @param serviceMonitor.annotations ServiceMonitor annotations
  347. annotations: {}
  348. worker:
  349. ## Enable or disable the Worker component
  350. enabled: true
  351. ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  352. podManagementPolicy: "Parallel"
  353. ## Replicas is the desired number of replicas of the given Template.
  354. replicas: "3"
  355. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  356. ## Clients such as tools and libraries can retrieve this metadata.
  357. annotations: {}
  358. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  359. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  360. affinity: {}
  361. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  362. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  363. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  364. nodeSelector: {}
  365. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  366. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  367. tolerations: []
  368. ## Compute Resources required by this container. Cannot be updated.
  369. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  370. resources: {}
  371. # resources:
  372. # limits:
  373. # memory: "8Gi"
  374. # cpu: "4"
  375. # requests:
  376. # memory: "2Gi"
  377. # cpu: "500m"
  378. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  379. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  380. livenessProbe:
  381. enabled: true
  382. initialDelaySeconds: "30"
  383. periodSeconds: "30"
  384. timeoutSeconds: "5"
  385. failureThreshold: "3"
  386. successThreshold: "1"
  387. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  388. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  389. readinessProbe:
  390. enabled: true
  391. initialDelaySeconds: "30"
  392. periodSeconds: "30"
  393. timeoutSeconds: "5"
  394. failureThreshold: "3"
  395. successThreshold: "1"
  396. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  397. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  398. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  399. ## A claim in this list takes precedence over any volumes in the template, with the same name.
  400. persistentVolumeClaim:
  401. enabled: false
  402. ## dolphinscheduler data volume
  403. dataPersistentVolume:
  404. enabled: false
  405. accessModes:
  406. - "ReadWriteOnce"
  407. storageClassName: "-"
  408. storage: "20Gi"
  409. ## dolphinscheduler logs volume
  410. logsPersistentVolume:
  411. enabled: false
  412. accessModes:
  413. - "ReadWriteOnce"
  414. storageClassName: "-"
  415. storage: "20Gi"
  416. env:
  417. WORKER_MAX_CPU_LOAD_AVG: "1"
  418. WORKER_RESERVED_MEMORY: "0.3"
  419. WORKER_EXEC_THREADS: "100"
  420. WORKER_HEARTBEAT_INTERVAL: "10s"
  421. WORKER_HEART_ERROR_THRESHOLD: "5"
  422. WORKER_HOST_WEIGHT: "100"
  423. keda:
  424. enabled: false
  425. namespaceLabels: { }
  426. # How often KEDA polls the DolphinScheduler DB to report new scale requests to the HPA
  427. pollingInterval: 5
  428. # How many seconds KEDA will wait before scaling to zero.
  429. # Note that HPA has a separate cooldown period for scale-downs
  430. cooldownPeriod: 30
  431. # Minimum number of workers created by keda
  432. minReplicaCount: 0
  433. # Maximum number of workers created by keda
  434. maxReplicaCount: 3
  435. # Specify HPA related options
  436. advanced: { }
  437. # horizontalPodAutoscalerConfig:
  438. # behavior:
  439. # scaleDown:
  440. # stabilizationWindowSeconds: 300
  441. # policies:
  442. # - type: Percent
  443. # value: 100
  444. # periodSeconds: 15
  445. service:
  446. # annotations may need to be set when want to scrapy metrics by prometheus but not install prometheus operator
  447. annotations: {}
  448. # serviceMonitor for prometheus operator
  449. serviceMonitor:
  450. # -- Enable or disable worker serviceMonitor
  451. enabled: false
  452. # -- @param serviceMonitor.interval interval at which metrics should be scraped
  453. interval: 15s
  454. # -- @param serviceMonitor.path path of the metrics endpoint
  455. path: /actuator/prometheus
  456. # -- @param serviceMonitor.labels ServiceMonitor extra labels
  457. labels: {}
  458. # -- @param serviceMonitor.annotations ServiceMonitor annotations
  459. annotations: {}
  460. alert:
  461. ## Enable or disable the Alert-Server component
  462. enabled: true
  463. ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  464. replicas: 1
  465. ## The deployment strategy to use to replace existing pods with new ones.
  466. strategy:
  467. type: "RollingUpdate"
  468. rollingUpdate:
  469. maxSurge: "25%"
  470. maxUnavailable: "25%"
  471. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  472. ## Clients such as tools and libraries can retrieve this metadata.
  473. annotations: {}
  474. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  475. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  476. affinity: {}
  477. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  478. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  479. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  480. nodeSelector: {}
  481. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  482. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  483. tolerations: []
  484. ## Compute Resources required by this container. Cannot be updated.
  485. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  486. resources: {}
  487. # resources:
  488. # limits:
  489. # memory: "2Gi"
  490. # cpu: "1"
  491. # requests:
  492. # memory: "1Gi"
  493. # cpu: "500m"
  494. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  495. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  496. livenessProbe:
  497. enabled: true
  498. initialDelaySeconds: "30"
  499. periodSeconds: "30"
  500. timeoutSeconds: "5"
  501. failureThreshold: "3"
  502. successThreshold: "1"
  503. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  504. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  505. readinessProbe:
  506. enabled: true
  507. initialDelaySeconds: "30"
  508. periodSeconds: "30"
  509. timeoutSeconds: "5"
  510. failureThreshold: "3"
  511. successThreshold: "1"
  512. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  513. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  514. persistentVolumeClaim:
  515. enabled: false
  516. accessModes:
  517. - "ReadWriteOnce"
  518. storageClassName: "-"
  519. storage: "20Gi"
  520. env:
  521. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  522. service:
  523. # annotations may need to be set when want to scrapy metrics by prometheus but not install prometheus operator
  524. annotations: {}
  525. # serviceMonitor for prometheus operator
  526. serviceMonitor:
  527. # -- Enable or disable alert-server serviceMonitor
  528. enabled: false
  529. # -- @param serviceMonitor.interval interval at which metrics should be scraped
  530. interval: 15s
  531. # -- @param serviceMonitor.path path of the metrics endpoint
  532. path: /actuator/prometheus
  533. # -- @param serviceMonitor.labels ServiceMonitor extra labels
  534. labels: {}
  535. # -- @param serviceMonitor.annotations ServiceMonitor annotations
  536. annotations: {}
  537. api:
  538. ## Enable or disable the API-Server component
  539. enabled: true
  540. ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  541. replicas: "1"
  542. ## The deployment strategy to use to replace existing pods with new ones.
  543. strategy:
  544. type: "RollingUpdate"
  545. rollingUpdate:
  546. maxSurge: "25%"
  547. maxUnavailable: "25%"
  548. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  549. ## Clients such as tools and libraries can retrieve this metadata.
  550. annotations: {}
  551. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  552. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  553. affinity: {}
  554. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  555. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  556. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  557. nodeSelector: {}
  558. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  559. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  560. tolerations: []
  561. ## Compute Resources required by this container. Cannot be updated.
  562. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  563. resources: {}
  564. # resources:
  565. # limits:
  566. # memory: "2Gi"
  567. # cpu: "1"
  568. # requests:
  569. # memory: "1Gi"
  570. # cpu: "500m"
  571. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  572. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  573. livenessProbe:
  574. enabled: true
  575. initialDelaySeconds: "30"
  576. periodSeconds: "30"
  577. timeoutSeconds: "5"
  578. failureThreshold: "3"
  579. successThreshold: "1"
  580. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  581. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  582. readinessProbe:
  583. enabled: true
  584. initialDelaySeconds: "30"
  585. periodSeconds: "30"
  586. timeoutSeconds: "5"
  587. failureThreshold: "3"
  588. successThreshold: "1"
  589. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  590. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  591. persistentVolumeClaim:
  592. enabled: false
  593. accessModes:
  594. - "ReadWriteOnce"
  595. storageClassName: "-"
  596. storage: "20Gi"
  597. service:
  598. ## type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer
  599. type: "ClusterIP"
  600. ## clusterIP is the IP address of the service and is usually assigned randomly by the master
  601. clusterIP: ""
  602. ## nodePort is the port on each node on which this api service is exposed when type=NodePort
  603. nodePort: ""
  604. ## pythonNodePort is the port on each node on which this python api service is exposed when type=NodePort
  605. pythonNodePort: ""
  606. ## externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service
  607. externalIPs: []
  608. ## externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service, requires Type to be ExternalName
  609. externalName: ""
  610. ## loadBalancerIP when service.type is LoadBalancer. LoadBalancer will get created with the IP specified in this field
  611. loadBalancerIP: ""
  612. ## annotations may need to be set when service.type is LoadBalancer
  613. ## service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
  614. annotations: {}
  615. # serviceMonitor for prometheus operator
  616. serviceMonitor:
  617. # -- Enable or disable api-server serviceMonitor
  618. enabled: false
  619. # -- @param serviceMonitor.interval interval at which metrics should be scraped
  620. interval: 15s
  621. # -- @param serviceMonitor.path path of the metrics endpoint
  622. path: /dolphinscheduler/actuator/prometheus
  623. # -- @param serviceMonitor.labels ServiceMonitor extra labels
  624. labels: {}
  625. # -- @param serviceMonitor.annotations ServiceMonitor annotations
  626. annotations: {}
  627. env:
  628. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  629. taskTypeFilter:
  630. # -- Enable or disable the task type filter
  631. # -- If set to true, the API-Server will return tasks of a specific type set in api.taskTypeFilter.task
  632. # -- Note: This feature only filters tasks to return a specific type on the WebUI. However, you can still create any task that DolphinScheduler supports via the API.
  633. enabled: false
  634. # -- @param taskTypeFilter.taskType task type
  635. # -- ref: https://github.com/apache/dolphinscheduler/blob/dev/dolphinscheduler-api/src/main/resources/task-type-config.yaml
  636. task: {}
  637. # example task sets
  638. # universal:
  639. # - 'SQL'
  640. # cloud: []
  641. # logic: []
  642. # dataIntegration: []
  643. # dataQuality: []
  644. # machineLearning: []
  645. # other: []
  646. ingress:
  647. enabled: false
  648. host: "dolphinscheduler.org"
  649. path: "/dolphinscheduler"
  650. annotations: {}
  651. tls:
  652. enabled: false
  653. secretName: "dolphinscheduler-tls"