values.yaml 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. #
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. #
  17. # Default values for dolphinscheduler-chart.
  18. # This is a YAML-formatted file.
  19. # Declare variables to be passed into your templates.
  20. timezone: "Asia/Shanghai"
  21. # Used to detect whether dolphinscheduler dependent services such as database are ready
  22. initImage:
  23. pullPolicy: "IfNotPresent"
  24. busybox: "busybox:1.30.1"
  25. image:
  26. registry: "dolphinscheduler.docker.scarf.sh/apache"
  27. tag: "dev-SNAPSHOT"
  28. pullPolicy: "IfNotPresent"
  29. pullSecret: ""
  30. master: dolphinscheduler-master
  31. worker: dolphinscheduler-worker
  32. api: dolphinscheduler-api
  33. alert: dolphinscheduler-alert-server
  34. tools: dolphinscheduler-tools
  35. ## If not exists external database, by default, Dolphinscheduler's database will use it.
  36. postgresql:
  37. enabled: true
  38. postgresqlUsername: "root"
  39. postgresqlPassword: "root"
  40. postgresqlDatabase: "dolphinscheduler"
  41. params: "characterEncoding=utf8"
  42. persistence:
  43. enabled: false
  44. size: "20Gi"
  45. storageClass: "-"
  46. mysql:
  47. enabled: false
  48. auth:
  49. username: "ds"
  50. password: "ds"
  51. database: "dolphinscheduler"
  52. params: "characterEncoding=utf8"
  53. primary:
  54. persistence:
  55. enabled: false
  56. size: "20Gi"
  57. storageClass: "-"
  58. minio:
  59. enabled: true
  60. auth:
  61. rootUser: minioadmin
  62. rootPassword: minioadmin
  63. persistence:
  64. enabled: false
  65. defaultBuckets: "dolphinscheduler"
  66. ## If exists external database, and set postgresql.enable value to false.
  67. ## external database will be used, otherwise Dolphinscheduler's database will be used.
  68. externalDatabase:
  69. enabled: false
  70. type: "postgresql"
  71. host: "localhost"
  72. port: "5432"
  73. username: "root"
  74. password: "root"
  75. database: "dolphinscheduler"
  76. params: "characterEncoding=utf8"
  77. ## If not exists external registry, the zookeeper registry will be used by default.
  78. zookeeper:
  79. enabled: true
  80. service:
  81. port: 2181
  82. fourlwCommandsWhitelist: "srvr,ruok,wchs,cons"
  83. persistence:
  84. enabled: false
  85. size: "20Gi"
  86. storageClass: "-"
  87. etcd:
  88. enabled: false
  89. endpoints: ""
  90. namespace: "dolphinscheduler"
  91. user: ""
  92. passWord: ""
  93. authority: ""
  94. # Please create a new folder: deploy/kubernetes/dolphinscheduler/etcd-certs
  95. ssl:
  96. enabled: false
  97. certFile: "etcd-certs/ca.crt"
  98. keyCertChainFile: "etcd-certs/client.crt"
  99. keyFile: "etcd-certs/client.pem"
  100. ## If exists external registry and set zookeeper.enable value to false, the external registry will be used.
  101. externalRegistry:
  102. registryPluginName: "zookeeper"
  103. registryServers: "127.0.0.1:2181"
  104. security:
  105. authentication:
  106. type: PASSWORD
  107. ldap:
  108. urls: ldap://ldap.forumsys.com:389/
  109. basedn: dc=example,dc=com
  110. username: cn=read-only-admin,dc=example,dc=com
  111. password: password
  112. user:
  113. admin: read-only-admin
  114. identityattribute: uid
  115. emailattribute: mail
  116. notexistaction: CREATE
  117. ssl:
  118. enable: false
  119. # do not change this value
  120. truststore: "/opt/ldapkeystore.jks"
  121. # if you use macOS, please run `base64 -b 0 -i /path/to/your.jks`
  122. # if you use Linux, please run `base64 -w 0 /path/to/your.jks`
  123. # if you use Windows, please run `certutil -f -encode /path/to/your.jks`
  124. # Then copy the base64 content to below field in one line
  125. jksbase64content: ""
  126. truststorepassword: ""
  127. conf:
  128. common:
  129. # user data local directory path, please make sure the directory exists and have read write permissions
  130. data.basedir.path: /tmp/dolphinscheduler
  131. # resource storage type: HDFS, S3, OSS, GCS, ABS, NONE
  132. resource.storage.type: S3
  133. # resource store on HDFS/S3 path, resource file will store to this base path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
  134. resource.storage.upload.base.path: /dolphinscheduler
  135. # The AWS access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  136. resource.aws.access.key.id: minioadmin
  137. # The AWS secret access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  138. resource.aws.secret.access.key: minioadmin
  139. # The AWS Region to use. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  140. resource.aws.region: ca-central-1
  141. # The name of the bucket. You need to create them by yourself. Otherwise, the system cannot start. All buckets in Amazon S3 share a single namespace; ensure the bucket is given a unique name.
  142. resource.aws.s3.bucket.name: dolphinscheduler
  143. # You need to set this parameter when private cloud s3. If S3 uses public cloud, you only need to set resource.aws.region or set to the endpoint of a public cloud such as S3.cn-north-1.amazonaws.com.cn
  144. resource.aws.s3.endpoint: http://minio:9000
  145. # alibaba cloud access key id, required if you set resource.storage.type=OSS
  146. resource.alibaba.cloud.access.key.id: <your-access-key-id>
  147. # alibaba cloud access key secret, required if you set resource.storage.type=OSS
  148. resource.alibaba.cloud.access.key.secret: <your-access-key-secret>
  149. # alibaba cloud region, required if you set resource.storage.type=OSS
  150. resource.alibaba.cloud.region: cn-hangzhou
  151. # oss bucket name, required if you set resource.storage.type=OSS
  152. resource.alibaba.cloud.oss.bucket.name: dolphinscheduler
  153. # oss bucket endpoint, required if you set resource.storage.type=OSS
  154. resource.alibaba.cloud.oss.endpoint: https://oss-cn-hangzhou.aliyuncs.com
  155. # if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
  156. resource.hdfs.root.user: hdfs
  157. # if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
  158. resource.hdfs.fs.defaultFS: hdfs://mycluster:8020
  159. # whether to startup kerberos
  160. hadoop.security.authentication.startup.state: false
  161. # java.security.krb5.conf path
  162. java.security.krb5.conf.path: /opt/krb5.conf
  163. # login user from keytab username
  164. login.user.keytab.username: hdfs-mycluster@ESZ.COM
  165. # login user from keytab path
  166. login.user.keytab.path: /opt/hdfs.headless.keytab
  167. # kerberos expire time, the unit is hour
  168. kerberos.expire.time: 2
  169. # resourcemanager port, the default value is 8088 if not specified
  170. resource.manager.httpaddress.port: 8088
  171. # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
  172. yarn.resourcemanager.ha.rm.ids: 192.168.xx.xx,192.168.xx.xx
  173. # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
  174. yarn.application.status.address: http://ds1:%s/ws/v1/cluster/apps/%s
  175. # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
  176. yarn.job.history.status.address: http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
  177. # datasource encryption enable
  178. datasource.encryption.enable: false
  179. # datasource encryption salt
  180. datasource.encryption.salt: '!@#$%^&*'
  181. # data quality option
  182. data-quality.jar.name: dolphinscheduler-data-quality-dev-SNAPSHOT.jar
  183. # Whether hive SQL is executed in the same session
  184. support.hive.oneSession: false
  185. # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
  186. sudo.enable: true
  187. # development state
  188. development.state: false
  189. # rpc port
  190. alert.rpc.port: 50052
  191. # set path of conda.sh
  192. conda.path: /opt/anaconda3/etc/profile.d/conda.sh
  193. # Task resource limit state
  194. task.resource.limit.state: false
  195. # mlflow task plugin preset repository
  196. ml.mlflow.preset_repository: https://github.com/apache/dolphinscheduler-mlflow
  197. # mlflow task plugin preset repository version
  198. ml.mlflow.preset_repository_version: "main"
  199. # way to collect applicationId: log, aop
  200. appId.collect: log
  201. common:
  202. ## Configmap
  203. configmap:
  204. DOLPHINSCHEDULER_OPTS: ""
  205. DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
  206. RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
  207. # dolphinscheduler env
  208. HADOOP_HOME: "/opt/soft/hadoop"
  209. HADOOP_CONF_DIR: "/opt/soft/hadoop/etc/hadoop"
  210. SPARK_HOME: "/opt/soft/spark"
  211. PYTHON_HOME: "/usr/bin/python"
  212. JAVA_HOME: "/opt/java/openjdk"
  213. HIVE_HOME: "/opt/soft/hive"
  214. FLINK_HOME: "/opt/soft/flink"
  215. DATAX_HOME: "/opt/soft/datax"
  216. ## Shared storage persistence mounted into api, master and worker, such as Hadoop, Spark, Flink and DataX binary package
  217. sharedStoragePersistence:
  218. enabled: false
  219. mountPath: "/opt/soft"
  220. accessModes:
  221. - "ReadWriteMany"
  222. ## storageClassName must support the access mode: ReadWriteMany
  223. storageClassName: "-"
  224. storage: "20Gi"
  225. ## If RESOURCE_STORAGE_TYPE is HDFS and FS_DEFAULT_FS is file:///, fsFileResourcePersistence should be enabled for resource storage
  226. fsFileResourcePersistence:
  227. enabled: false
  228. accessModes:
  229. - "ReadWriteMany"
  230. ## storageClassName must support the access mode: ReadWriteMany
  231. storageClassName: "-"
  232. storage: "20Gi"
  233. master:
  234. ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  235. podManagementPolicy: "Parallel"
  236. ## Replicas is the desired number of replicas of the given Template.
  237. replicas: "3"
  238. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  239. ## Clients such as tools and libraries can retrieve this metadata.
  240. annotations: {}
  241. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  242. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  243. affinity: {}
  244. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  245. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  246. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  247. nodeSelector: {}
  248. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  249. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  250. tolerations: []
  251. ## Compute Resources required by this container. Cannot be updated.
  252. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  253. resources: {}
  254. # resources:
  255. # limits:
  256. # memory: "8Gi"
  257. # cpu: "4"
  258. # requests:
  259. # memory: "2Gi"
  260. # cpu: "500m"
  261. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  262. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  263. livenessProbe:
  264. enabled: true
  265. initialDelaySeconds: "30"
  266. periodSeconds: "30"
  267. timeoutSeconds: "5"
  268. failureThreshold: "3"
  269. successThreshold: "1"
  270. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  271. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  272. readinessProbe:
  273. enabled: true
  274. initialDelaySeconds: "30"
  275. periodSeconds: "30"
  276. timeoutSeconds: "5"
  277. failureThreshold: "3"
  278. successThreshold: "1"
  279. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  280. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  281. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  282. ## A claim in this list takes precedence over any volumes in the template, with the same name.
  283. persistentVolumeClaim:
  284. enabled: false
  285. accessModes:
  286. - "ReadWriteOnce"
  287. storageClassName: "-"
  288. storage: "20Gi"
  289. env:
  290. JAVA_OPTS: "-Xms1g -Xmx1g -Xmn512m"
  291. MASTER_EXEC_THREADS: "100"
  292. MASTER_EXEC_TASK_NUM: "20"
  293. MASTER_DISPATCH_TASK_NUM: "3"
  294. MASTER_HOST_SELECTOR: "LowerWeight"
  295. MASTER_HEARTBEAT_INTERVAL: "10s"
  296. MASTER_HEARTBEAT_ERROR_THRESHOLD: "5"
  297. MASTER_TASK_COMMIT_RETRYTIMES: "5"
  298. MASTER_TASK_COMMIT_INTERVAL: "1s"
  299. MASTER_STATE_WHEEL_INTERVAL: "5s"
  300. MASTER_MAX_CPU_LOAD_AVG: "1"
  301. MASTER_RESERVED_MEMORY: "0.3"
  302. MASTER_FAILOVER_INTERVAL: "10m"
  303. MASTER_KILL_APPLICATION_WHEN_HANDLE_FAILOVER: "true"
  304. service:
  305. # annotations may need to be set when want to scrapy metrics by prometheus but not install prometheus operator
  306. annotations: {}
  307. # serviceMonitor for prometheus operator
  308. serviceMonitor:
  309. # -- Enable or disable master serviceMonitor
  310. enabled: false
  311. # -- @param serviceMonitor.interval interval at which metrics should be scraped
  312. interval: 15s
  313. # -- @param serviceMonitor.path path of the metrics endpoint
  314. path: /actuator/prometheus
  315. # -- @param serviceMonitor.labels ServiceMonitor extra labels
  316. labels: {}
  317. # -- @param serviceMonitor.annotations ServiceMonitor annotations
  318. annotations: {}
  319. worker:
  320. ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  321. podManagementPolicy: "Parallel"
  322. ## Replicas is the desired number of replicas of the given Template.
  323. replicas: "3"
  324. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  325. ## Clients such as tools and libraries can retrieve this metadata.
  326. annotations: {}
  327. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  328. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  329. affinity: {}
  330. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  331. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  332. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  333. nodeSelector: {}
  334. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  335. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  336. tolerations: []
  337. ## Compute Resources required by this container. Cannot be updated.
  338. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  339. resources: {}
  340. # resources:
  341. # limits:
  342. # memory: "8Gi"
  343. # cpu: "4"
  344. # requests:
  345. # memory: "2Gi"
  346. # cpu: "500m"
  347. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  348. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  349. livenessProbe:
  350. enabled: true
  351. initialDelaySeconds: "30"
  352. periodSeconds: "30"
  353. timeoutSeconds: "5"
  354. failureThreshold: "3"
  355. successThreshold: "1"
  356. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  357. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  358. readinessProbe:
  359. enabled: true
  360. initialDelaySeconds: "30"
  361. periodSeconds: "30"
  362. timeoutSeconds: "5"
  363. failureThreshold: "3"
  364. successThreshold: "1"
  365. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  366. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  367. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  368. ## A claim in this list takes precedence over any volumes in the template, with the same name.
  369. persistentVolumeClaim:
  370. enabled: false
  371. ## dolphinscheduler data volume
  372. dataPersistentVolume:
  373. enabled: false
  374. accessModes:
  375. - "ReadWriteOnce"
  376. storageClassName: "-"
  377. storage: "20Gi"
  378. ## dolphinscheduler logs volume
  379. logsPersistentVolume:
  380. enabled: false
  381. accessModes:
  382. - "ReadWriteOnce"
  383. storageClassName: "-"
  384. storage: "20Gi"
  385. env:
  386. WORKER_MAX_CPU_LOAD_AVG: "1"
  387. WORKER_RESERVED_MEMORY: "0.3"
  388. WORKER_EXEC_THREADS: "100"
  389. WORKER_HEARTBEAT_INTERVAL: "10s"
  390. WORKER_HEART_ERROR_THRESHOLD: "5"
  391. WORKER_HOST_WEIGHT: "100"
  392. keda:
  393. enabled: false
  394. namespaceLabels: { }
  395. # How often KEDA polls the DolphinScheduler DB to report new scale requests to the HPA
  396. pollingInterval: 5
  397. # How many seconds KEDA will wait before scaling to zero.
  398. # Note that HPA has a separate cooldown period for scale-downs
  399. cooldownPeriod: 30
  400. # Minimum number of workers created by keda
  401. minReplicaCount: 0
  402. # Maximum number of workers created by keda
  403. maxReplicaCount: 3
  404. # Specify HPA related options
  405. advanced: { }
  406. # horizontalPodAutoscalerConfig:
  407. # behavior:
  408. # scaleDown:
  409. # stabilizationWindowSeconds: 300
  410. # policies:
  411. # - type: Percent
  412. # value: 100
  413. # periodSeconds: 15
  414. service:
  415. # annotations may need to be set when want to scrapy metrics by prometheus but not install prometheus operator
  416. annotations: {}
  417. # serviceMonitor for prometheus operator
  418. serviceMonitor:
  419. # -- Enable or disable worker serviceMonitor
  420. enabled: false
  421. # -- @param serviceMonitor.interval interval at which metrics should be scraped
  422. interval: 15s
  423. # -- @param serviceMonitor.path path of the metrics endpoint
  424. path: /actuator/prometheus
  425. # -- @param serviceMonitor.labels ServiceMonitor extra labels
  426. labels: {}
  427. # -- @param serviceMonitor.annotations ServiceMonitor annotations
  428. annotations: {}
  429. alert:
  430. ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  431. replicas: 1
  432. ## The deployment strategy to use to replace existing pods with new ones.
  433. strategy:
  434. type: "RollingUpdate"
  435. rollingUpdate:
  436. maxSurge: "25%"
  437. maxUnavailable: "25%"
  438. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  439. ## Clients such as tools and libraries can retrieve this metadata.
  440. annotations: {}
  441. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  442. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  443. affinity: {}
  444. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  445. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  446. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  447. nodeSelector: {}
  448. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  449. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  450. tolerations: []
  451. ## Compute Resources required by this container. Cannot be updated.
  452. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  453. resources: {}
  454. # resources:
  455. # limits:
  456. # memory: "2Gi"
  457. # cpu: "1"
  458. # requests:
  459. # memory: "1Gi"
  460. # cpu: "500m"
  461. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  462. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  463. livenessProbe:
  464. enabled: true
  465. initialDelaySeconds: "30"
  466. periodSeconds: "30"
  467. timeoutSeconds: "5"
  468. failureThreshold: "3"
  469. successThreshold: "1"
  470. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  471. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  472. readinessProbe:
  473. enabled: true
  474. initialDelaySeconds: "30"
  475. periodSeconds: "30"
  476. timeoutSeconds: "5"
  477. failureThreshold: "3"
  478. successThreshold: "1"
  479. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  480. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  481. persistentVolumeClaim:
  482. enabled: false
  483. accessModes:
  484. - "ReadWriteOnce"
  485. storageClassName: "-"
  486. storage: "20Gi"
  487. env:
  488. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  489. service:
  490. # annotations may need to be set when want to scrapy metrics by prometheus but not install prometheus operator
  491. annotations: {}
  492. # serviceMonitor for prometheus operator
  493. serviceMonitor:
  494. # -- Enable or disable alert-server serviceMonitor
  495. enabled: false
  496. # -- @param serviceMonitor.interval interval at which metrics should be scraped
  497. interval: 15s
  498. # -- @param serviceMonitor.path path of the metrics endpoint
  499. path: /actuator/prometheus
  500. # -- @param serviceMonitor.labels ServiceMonitor extra labels
  501. labels: {}
  502. # -- @param serviceMonitor.annotations ServiceMonitor annotations
  503. annotations: {}
  504. api:
  505. ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  506. replicas: "1"
  507. ## The deployment strategy to use to replace existing pods with new ones.
  508. strategy:
  509. type: "RollingUpdate"
  510. rollingUpdate:
  511. maxSurge: "25%"
  512. maxUnavailable: "25%"
  513. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  514. ## Clients such as tools and libraries can retrieve this metadata.
  515. annotations: {}
  516. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  517. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  518. affinity: {}
  519. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  520. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  521. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  522. nodeSelector: {}
  523. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  524. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  525. tolerations: []
  526. ## Compute Resources required by this container. Cannot be updated.
  527. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  528. resources: {}
  529. # resources:
  530. # limits:
  531. # memory: "2Gi"
  532. # cpu: "1"
  533. # requests:
  534. # memory: "1Gi"
  535. # cpu: "500m"
  536. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  537. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  538. livenessProbe:
  539. enabled: true
  540. initialDelaySeconds: "30"
  541. periodSeconds: "30"
  542. timeoutSeconds: "5"
  543. failureThreshold: "3"
  544. successThreshold: "1"
  545. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  546. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  547. readinessProbe:
  548. enabled: true
  549. initialDelaySeconds: "30"
  550. periodSeconds: "30"
  551. timeoutSeconds: "5"
  552. failureThreshold: "3"
  553. successThreshold: "1"
  554. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  555. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  556. persistentVolumeClaim:
  557. enabled: false
  558. accessModes:
  559. - "ReadWriteOnce"
  560. storageClassName: "-"
  561. storage: "20Gi"
  562. service:
  563. ## type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer
  564. type: "ClusterIP"
  565. ## clusterIP is the IP address of the service and is usually assigned randomly by the master
  566. clusterIP: ""
  567. ## nodePort is the port on each node on which this api service is exposed when type=NodePort
  568. nodePort: ""
  569. ## pythonNodePort is the port on each node on which this python api service is exposed when type=NodePort
  570. pythonNodePort: ""
  571. ## externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service
  572. externalIPs: []
  573. ## externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service, requires Type to be ExternalName
  574. externalName: ""
  575. ## loadBalancerIP when service.type is LoadBalancer. LoadBalancer will get created with the IP specified in this field
  576. loadBalancerIP: ""
  577. ## annotations may need to be set when service.type is LoadBalancer
  578. ## service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
  579. annotations: {}
  580. # serviceMonitor for prometheus operator
  581. serviceMonitor:
  582. # -- Enable or disable api-server serviceMonitor
  583. enabled: false
  584. # -- @param serviceMonitor.interval interval at which metrics should be scraped
  585. interval: 15s
  586. # -- @param serviceMonitor.path path of the metrics endpoint
  587. path: /dolphinscheduler/actuator/prometheus
  588. # -- @param serviceMonitor.labels ServiceMonitor extra labels
  589. labels: {}
  590. # -- @param serviceMonitor.annotations ServiceMonitor annotations
  591. annotations: {}
  592. env:
  593. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  594. ingress:
  595. enabled: false
  596. host: "dolphinscheduler.org"
  597. path: "/dolphinscheduler"
  598. annotations: {}
  599. tls:
  600. enabled: false
  601. secretName: "dolphinscheduler-tls"