values.yaml 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. #
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. #
  17. # Default values for dolphinscheduler-chart.
  18. # This is a YAML-formatted file.
  19. # Declare variables to be passed into your templates.
  20. timezone: "Asia/Shanghai"
  21. image:
  22. registry: "dolphinscheduler.docker.scarf.sh/apache"
  23. tag: "dev-SNAPSHOT"
  24. pullPolicy: "IfNotPresent"
  25. pullSecret: ""
  26. master: dolphinscheduler-master
  27. worker: dolphinscheduler-worker
  28. api: dolphinscheduler-api
  29. alert: dolphinscheduler-alert-server
  30. tools: dolphinscheduler-tools
  31. ## If not exists external database, by default, Dolphinscheduler's database will use it.
  32. postgresql:
  33. enabled: true
  34. postgresqlUsername: "root"
  35. postgresqlPassword: "root"
  36. postgresqlDatabase: "dolphinscheduler"
  37. params: "characterEncoding=utf8"
  38. persistence:
  39. enabled: false
  40. size: "20Gi"
  41. storageClass: "-"
  42. mysql:
  43. enabled: false
  44. auth:
  45. username: "ds"
  46. password: "ds"
  47. database: "dolphinscheduler"
  48. params: "characterEncoding=utf8"
  49. primary:
  50. persistence:
  51. enabled: false
  52. size: "20Gi"
  53. storageClass: "-"
  54. minio:
  55. enabled: true
  56. auth:
  57. rootUser: minioadmin
  58. rootPassword: minioadmin
  59. persistence:
  60. enabled: false
  61. defaultBuckets: "dolphinscheduler"
  62. ## If exists external database, and set postgresql.enable value to false.
  63. ## external database will be used, otherwise Dolphinscheduler's database will be used.
  64. externalDatabase:
  65. enabled: false
  66. type: "postgresql"
  67. host: "localhost"
  68. port: "5432"
  69. username: "root"
  70. password: "root"
  71. database: "dolphinscheduler"
  72. params: "characterEncoding=utf8"
  73. ## If not exists external registry, the zookeeper registry will be used by default.
  74. zookeeper:
  75. enabled: true
  76. service:
  77. port: 2181
  78. fourlwCommandsWhitelist: "srvr,ruok,wchs,cons"
  79. persistence:
  80. enabled: false
  81. size: "20Gi"
  82. storageClass: "-"
  83. ## If exists external registry and set zookeeper.enable value to false, the external registry will be used.
  84. externalRegistry:
  85. registryPluginDir: "lib/plugin/registry"
  86. registryPluginName: "zookeeper"
  87. registryServers: "127.0.0.1:2181"
  88. conf:
  89. common:
  90. # user data local directory path, please make sure the directory exists and have read write permissions
  91. data.basedir.path: /tmp/dolphinscheduler
  92. # resource storage type: HDFS, S3, NONE
  93. resource.storage.type: S3
  94. # resource store on HDFS/S3 path, resource file will store to this base path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
  95. resource.storage.upload.base.path: /dolphinscheduler
  96. # The AWS access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  97. resource.aws.access.key.id: minioadmin
  98. # The AWS secret access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  99. resource.aws.secret.access.key: minioadmin
  100. # The AWS Region to use. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  101. resource.aws.region: ca-central-1
  102. # The name of the bucket. You need to create them by yourself. Otherwise, the system cannot start. All buckets in Amazon S3 share a single namespace; ensure the bucket is given a unique name.
  103. resource.aws.s3.bucket.name: dolphinscheduler
  104. # You need to set this parameter when private cloud s3. If S3 uses public cloud, you only need to set resource.aws.region or set to the endpoint of a public cloud such as S3.cn-north-1.amazonaws.com.cn
  105. resource.aws.s3.endpoint: http://minio:9000
  106. # alibaba cloud access key id, required if you set resource.storage.type=OSS
  107. resource.alibaba.cloud.access.key.id: <your-access-key-id>
  108. # alibaba cloud access key secret, required if you set resource.storage.type=OSS
  109. resource.alibaba.cloud.access.key.secret: <your-access-key-secret>
  110. # alibaba cloud region, required if you set resource.storage.type=OSS
  111. resource.alibaba.cloud.region: cn-hangzhou
  112. # oss bucket name, required if you set resource.storage.type=OSS
  113. resource.alibaba.cloud.oss.bucket.name: dolphinscheduler
  114. # oss bucket endpoint, required if you set resource.storage.type=OSS
  115. resource.alibaba.cloud.oss.endpoint: https://oss-cn-hangzhou.aliyuncs.com
  116. # if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
  117. resource.hdfs.root.user: hdfs
  118. # if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
  119. resource.hdfs.fs.defaultFS: hdfs://mycluster:8020
  120. # whether to startup kerberos
  121. hadoop.security.authentication.startup.state: false
  122. # java.security.krb5.conf path
  123. java.security.krb5.conf.path: /opt/krb5.conf
  124. # login user from keytab username
  125. login.user.keytab.username: hdfs-mycluster@ESZ.COM
  126. # login user from keytab path
  127. login.user.keytab.path: /opt/hdfs.headless.keytab
  128. # kerberos expire time, the unit is hour
  129. kerberos.expire.time: 2
  130. # resourcemanager port, the default value is 8088 if not specified
  131. resource.manager.httpaddress.port: 8088
  132. # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
  133. yarn.resourcemanager.ha.rm.ids: 192.168.xx.xx,192.168.xx.xx
  134. # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
  135. yarn.application.status.address: http://ds1:%s/ws/v1/cluster/apps/%s
  136. # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
  137. yarn.job.history.status.address: http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
  138. # datasource encryption enable
  139. datasource.encryption.enable: false
  140. # datasource encryption salt
  141. datasource.encryption.salt: '!@#$%^&*'
  142. # data quality option
  143. data-quality.jar.name: dolphinscheduler-data-quality-dev-SNAPSHOT.jar
  144. # Whether hive SQL is executed in the same session
  145. support.hive.oneSession: false
  146. # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
  147. sudo.enable: true
  148. # development state
  149. development.state: false
  150. # rpc port
  151. alert.rpc.port: 50052
  152. # set path of conda.sh
  153. conda.path: /opt/anaconda3/etc/profile.d/conda.sh
  154. # Task resource limit state
  155. task.resource.limit.state: false
  156. # mlflow task plugin preset repository
  157. ml.mlflow.preset_repository: https://github.com/apache/dolphinscheduler-mlflow
  158. # mlflow task plugin preset repository version
  159. ml.mlflow.preset_repository_version: "main"
  160. # way to collect applicationId: log, aop
  161. appId.collect: log
  162. common:
  163. ## Configmap
  164. configmap:
  165. DOLPHINSCHEDULER_OPTS: ""
  166. DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
  167. RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
  168. # dolphinscheduler env
  169. HADOOP_HOME: "/opt/soft/hadoop"
  170. HADOOP_CONF_DIR: "/opt/soft/hadoop/etc/hadoop"
  171. SPARK_HOME: "/opt/soft/spark"
  172. PYTHON_HOME: "/usr/bin/python"
  173. JAVA_HOME: "/opt/java/openjdk"
  174. HIVE_HOME: "/opt/soft/hive"
  175. FLINK_HOME: "/opt/soft/flink"
  176. DATAX_HOME: "/opt/soft/datax"
  177. ## Shared storage persistence mounted into api, master and worker, such as Hadoop, Spark, Flink and DataX binary package
  178. sharedStoragePersistence:
  179. enabled: false
  180. mountPath: "/opt/soft"
  181. accessModes:
  182. - "ReadWriteMany"
  183. ## storageClassName must support the access mode: ReadWriteMany
  184. storageClassName: "-"
  185. storage: "20Gi"
  186. ## If RESOURCE_STORAGE_TYPE is HDFS and FS_DEFAULT_FS is file:///, fsFileResourcePersistence should be enabled for resource storage
  187. fsFileResourcePersistence:
  188. enabled: false
  189. accessModes:
  190. - "ReadWriteMany"
  191. ## storageClassName must support the access mode: ReadWriteMany
  192. storageClassName: "-"
  193. storage: "20Gi"
  194. master:
  195. ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  196. podManagementPolicy: "Parallel"
  197. ## Replicas is the desired number of replicas of the given Template.
  198. replicas: "3"
  199. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  200. ## Clients such as tools and libraries can retrieve this metadata.
  201. annotations: {}
  202. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  203. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  204. affinity: {}
  205. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  206. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  207. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  208. nodeSelector: {}
  209. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  210. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  211. tolerations: []
  212. ## Compute Resources required by this container. Cannot be updated.
  213. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  214. resources: {}
  215. # resources:
  216. # limits:
  217. # memory: "8Gi"
  218. # cpu: "4"
  219. # requests:
  220. # memory: "2Gi"
  221. # cpu: "500m"
  222. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  223. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  224. livenessProbe:
  225. enabled: true
  226. initialDelaySeconds: "30"
  227. periodSeconds: "30"
  228. timeoutSeconds: "5"
  229. failureThreshold: "3"
  230. successThreshold: "1"
  231. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  232. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  233. readinessProbe:
  234. enabled: true
  235. initialDelaySeconds: "30"
  236. periodSeconds: "30"
  237. timeoutSeconds: "5"
  238. failureThreshold: "3"
  239. successThreshold: "1"
  240. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  241. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  242. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  243. ## A claim in this list takes precedence over any volumes in the template, with the same name.
  244. persistentVolumeClaim:
  245. enabled: false
  246. accessModes:
  247. - "ReadWriteOnce"
  248. storageClassName: "-"
  249. storage: "20Gi"
  250. env:
  251. JAVA_OPTS: "-Xms1g -Xmx1g -Xmn512m"
  252. MASTER_EXEC_THREADS: "100"
  253. MASTER_EXEC_TASK_NUM: "20"
  254. MASTER_DISPATCH_TASK_NUM: "3"
  255. MASTER_HOST_SELECTOR: "LowerWeight"
  256. MASTER_HEARTBEAT_INTERVAL: "10s"
  257. MASTER_HEARTBEAT_ERROR_THRESHOLD: "5"
  258. MASTER_TASK_COMMIT_RETRYTIMES: "5"
  259. MASTER_TASK_COMMIT_INTERVAL: "1s"
  260. MASTER_STATE_WHEEL_INTERVAL: "5s"
  261. MASTER_MAX_CPU_LOAD_AVG: "-1"
  262. MASTER_RESERVED_MEMORY: "0.3"
  263. MASTER_FAILOVER_INTERVAL: "10m"
  264. MASTER_KILL_YARN_JOB_WHEN_HANDLE_FAILOVER: "true"
  265. worker:
  266. ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  267. podManagementPolicy: "Parallel"
  268. ## Replicas is the desired number of replicas of the given Template.
  269. replicas: "3"
  270. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  271. ## Clients such as tools and libraries can retrieve this metadata.
  272. annotations: {}
  273. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  274. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  275. affinity: {}
  276. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  277. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  278. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  279. nodeSelector: {}
  280. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  281. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  282. tolerations: []
  283. ## Compute Resources required by this container. Cannot be updated.
  284. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  285. resources: {}
  286. # resources:
  287. # limits:
  288. # memory: "8Gi"
  289. # cpu: "4"
  290. # requests:
  291. # memory: "2Gi"
  292. # cpu: "500m"
  293. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  294. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  295. livenessProbe:
  296. enabled: true
  297. initialDelaySeconds: "30"
  298. periodSeconds: "30"
  299. timeoutSeconds: "5"
  300. failureThreshold: "3"
  301. successThreshold: "1"
  302. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  303. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  304. readinessProbe:
  305. enabled: true
  306. initialDelaySeconds: "30"
  307. periodSeconds: "30"
  308. timeoutSeconds: "5"
  309. failureThreshold: "3"
  310. successThreshold: "1"
  311. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  312. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  313. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  314. ## A claim in this list takes precedence over any volumes in the template, with the same name.
  315. persistentVolumeClaim:
  316. enabled: false
  317. ## dolphinscheduler data volume
  318. dataPersistentVolume:
  319. enabled: false
  320. accessModes:
  321. - "ReadWriteOnce"
  322. storageClassName: "-"
  323. storage: "20Gi"
  324. ## dolphinscheduler logs volume
  325. logsPersistentVolume:
  326. enabled: false
  327. accessModes:
  328. - "ReadWriteOnce"
  329. storageClassName: "-"
  330. storage: "20Gi"
  331. env:
  332. WORKER_GROUPS_0: default
  333. WORKER_MAX_CPU_LOAD_AVG: "-1"
  334. WORKER_RESERVED_MEMORY: "0.3"
  335. WORKER_EXEC_THREADS: "100"
  336. WORKER_HEARTBEAT_INTERVAL: "10s"
  337. WORKER_HEART_ERROR_THRESHOLD: "5"
  338. WORKER_HOST_WEIGHT: "100"
  339. WORKER_GROUPS: "default"
  340. keda:
  341. enabled: false
  342. namespaceLabels: { }
  343. # How often KEDA polls the DolphinScheduler DB to report new scale requests to the HPA
  344. pollingInterval: 5
  345. # How many seconds KEDA will wait before scaling to zero.
  346. # Note that HPA has a separate cooldown period for scale-downs
  347. cooldownPeriod: 30
  348. # Minimum number of workers created by keda
  349. minReplicaCount: 0
  350. # Maximum number of workers created by keda
  351. maxReplicaCount: 3
  352. # Specify HPA related options
  353. advanced: { }
  354. # horizontalPodAutoscalerConfig:
  355. # behavior:
  356. # scaleDown:
  357. # stabilizationWindowSeconds: 300
  358. # policies:
  359. # - type: Percent
  360. # value: 100
  361. # periodSeconds: 15
  362. alert:
  363. ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  364. replicas: 1
  365. ## The deployment strategy to use to replace existing pods with new ones.
  366. strategy:
  367. type: "RollingUpdate"
  368. rollingUpdate:
  369. maxSurge: "25%"
  370. maxUnavailable: "25%"
  371. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  372. ## Clients such as tools and libraries can retrieve this metadata.
  373. annotations: {}
  374. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  375. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  376. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  377. affinity: {}
  378. ## Compute Resources required by this container. Cannot be updated.
  379. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  380. nodeSelector: {}
  381. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  382. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  383. tolerations: []
  384. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  385. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  386. resources: {}
  387. # resources:
  388. # limits:
  389. # memory: "2Gi"
  390. # cpu: "1"
  391. # requests:
  392. # memory: "1Gi"
  393. # cpu: "500m"
  394. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  395. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  396. livenessProbe:
  397. enabled: true
  398. initialDelaySeconds: "30"
  399. periodSeconds: "30"
  400. timeoutSeconds: "5"
  401. failureThreshold: "3"
  402. successThreshold: "1"
  403. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  404. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  405. readinessProbe:
  406. enabled: true
  407. initialDelaySeconds: "30"
  408. periodSeconds: "30"
  409. timeoutSeconds: "5"
  410. failureThreshold: "3"
  411. successThreshold: "1"
  412. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  413. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  414. persistentVolumeClaim:
  415. enabled: false
  416. accessModes:
  417. - "ReadWriteOnce"
  418. storageClassName: "-"
  419. storage: "20Gi"
  420. env:
  421. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  422. api:
  423. ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  424. replicas: "1"
  425. ## The deployment strategy to use to replace existing pods with new ones.
  426. strategy:
  427. type: "RollingUpdate"
  428. rollingUpdate:
  429. maxSurge: "25%"
  430. maxUnavailable: "25%"
  431. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  432. ## Clients such as tools and libraries can retrieve this metadata.
  433. annotations: {}
  434. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  435. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  436. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  437. affinity: {}
  438. ## Compute Resources required by this container. Cannot be updated.
  439. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  440. nodeSelector: {}
  441. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  442. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  443. tolerations: []
  444. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  445. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  446. resources: {}
  447. # resources:
  448. # limits:
  449. # memory: "2Gi"
  450. # cpu: "1"
  451. # requests:
  452. # memory: "1Gi"
  453. # cpu: "500m"
  454. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  455. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  456. livenessProbe:
  457. enabled: true
  458. initialDelaySeconds: "30"
  459. periodSeconds: "30"
  460. timeoutSeconds: "5"
  461. failureThreshold: "3"
  462. successThreshold: "1"
  463. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  464. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  465. readinessProbe:
  466. enabled: true
  467. initialDelaySeconds: "30"
  468. periodSeconds: "30"
  469. timeoutSeconds: "5"
  470. failureThreshold: "3"
  471. successThreshold: "1"
  472. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  473. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  474. persistentVolumeClaim:
  475. enabled: false
  476. accessModes:
  477. - "ReadWriteOnce"
  478. storageClassName: "-"
  479. storage: "20Gi"
  480. service:
  481. ## type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer
  482. type: "ClusterIP"
  483. ## clusterIP is the IP address of the service and is usually assigned randomly by the master
  484. clusterIP: ""
  485. ## nodePort is the port on each node on which this api service is exposed when type=NodePort
  486. nodePort: ""
  487. ## pythonNodePort is the port on each node on which this python api service is exposed when type=NodePort
  488. pythonNodePort: ""
  489. ## externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service
  490. externalIPs: []
  491. ## externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service, requires Type to be ExternalName
  492. externalName: ""
  493. ## loadBalancerIP when service.type is LoadBalancer. LoadBalancer will get created with the IP specified in this field
  494. loadBalancerIP: ""
  495. ## annotations may need to be set when service.type is LoadBalancer
  496. ## service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
  497. annotations: {}
  498. env:
  499. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  500. ingress:
  501. enabled: false
  502. host: "dolphinscheduler.org"
  503. path: "/dolphinscheduler"
  504. annotations: {}
  505. tls:
  506. enabled: false
  507. secretName: "dolphinscheduler-tls"