values.yaml 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567
  1. #
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. #
  17. # Default values for dolphinscheduler-chart.
  18. # This is a YAML-formatted file.
  19. # Declare variables to be passed into your templates.
  20. timezone: "Asia/Shanghai"
  21. # Used to detect whether dolphinscheduler dependent services such as database are ready
  22. initImage:
  23. pullPolicy: "IfNotPresent"
  24. busybox: "busybox:1.30.1"
  25. image:
  26. registry: "dolphinscheduler.docker.scarf.sh/apache"
  27. tag: "dev-SNAPSHOT"
  28. pullPolicy: "IfNotPresent"
  29. pullSecret: ""
  30. master: dolphinscheduler-master
  31. worker: dolphinscheduler-worker
  32. api: dolphinscheduler-api
  33. alert: dolphinscheduler-alert-server
  34. tools: dolphinscheduler-tools
  35. ## If not exists external database, by default, Dolphinscheduler's database will use it.
  36. postgresql:
  37. enabled: true
  38. postgresqlUsername: "root"
  39. postgresqlPassword: "root"
  40. postgresqlDatabase: "dolphinscheduler"
  41. params: "characterEncoding=utf8"
  42. persistence:
  43. enabled: false
  44. size: "20Gi"
  45. storageClass: "-"
  46. mysql:
  47. enabled: false
  48. auth:
  49. username: "ds"
  50. password: "ds"
  51. database: "dolphinscheduler"
  52. params: "characterEncoding=utf8"
  53. primary:
  54. persistence:
  55. enabled: false
  56. size: "20Gi"
  57. storageClass: "-"
  58. minio:
  59. enabled: true
  60. auth:
  61. rootUser: minioadmin
  62. rootPassword: minioadmin
  63. persistence:
  64. enabled: false
  65. defaultBuckets: "dolphinscheduler"
  66. ## If exists external database, and set postgresql.enable value to false.
  67. ## external database will be used, otherwise Dolphinscheduler's database will be used.
  68. externalDatabase:
  69. enabled: false
  70. type: "postgresql"
  71. host: "localhost"
  72. port: "5432"
  73. username: "root"
  74. password: "root"
  75. database: "dolphinscheduler"
  76. params: "characterEncoding=utf8"
  77. ## If not exists external registry, the zookeeper registry will be used by default.
  78. zookeeper:
  79. enabled: true
  80. service:
  81. port: 2181
  82. fourlwCommandsWhitelist: "srvr,ruok,wchs,cons"
  83. persistence:
  84. enabled: false
  85. size: "20Gi"
  86. storageClass: "-"
  87. ## If exists external registry and set zookeeper.enable value to false, the external registry will be used.
  88. externalRegistry:
  89. registryPluginName: "zookeeper"
  90. registryServers: "127.0.0.1:2181"
  91. conf:
  92. common:
  93. # user data local directory path, please make sure the directory exists and have read write permissions
  94. data.basedir.path: /tmp/dolphinscheduler
  95. # resource storage type: HDFS, S3, OSS, GCS, NONE
  96. resource.storage.type: S3
  97. # resource store on HDFS/S3 path, resource file will store to this base path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
  98. resource.storage.upload.base.path: /dolphinscheduler
  99. # The AWS access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  100. resource.aws.access.key.id: minioadmin
  101. # The AWS secret access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  102. resource.aws.secret.access.key: minioadmin
  103. # The AWS Region to use. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  104. resource.aws.region: ca-central-1
  105. # The name of the bucket. You need to create them by yourself. Otherwise, the system cannot start. All buckets in Amazon S3 share a single namespace; ensure the bucket is given a unique name.
  106. resource.aws.s3.bucket.name: dolphinscheduler
  107. # You need to set this parameter when private cloud s3. If S3 uses public cloud, you only need to set resource.aws.region or set to the endpoint of a public cloud such as S3.cn-north-1.amazonaws.com.cn
  108. resource.aws.s3.endpoint: http://minio:9000
  109. # alibaba cloud access key id, required if you set resource.storage.type=OSS
  110. resource.alibaba.cloud.access.key.id: <your-access-key-id>
  111. # alibaba cloud access key secret, required if you set resource.storage.type=OSS
  112. resource.alibaba.cloud.access.key.secret: <your-access-key-secret>
  113. # alibaba cloud region, required if you set resource.storage.type=OSS
  114. resource.alibaba.cloud.region: cn-hangzhou
  115. # oss bucket name, required if you set resource.storage.type=OSS
  116. resource.alibaba.cloud.oss.bucket.name: dolphinscheduler
  117. # oss bucket endpoint, required if you set resource.storage.type=OSS
  118. resource.alibaba.cloud.oss.endpoint: https://oss-cn-hangzhou.aliyuncs.com
  119. # if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
  120. resource.hdfs.root.user: hdfs
  121. # if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
  122. resource.hdfs.fs.defaultFS: hdfs://mycluster:8020
  123. # whether to startup kerberos
  124. hadoop.security.authentication.startup.state: false
  125. # java.security.krb5.conf path
  126. java.security.krb5.conf.path: /opt/krb5.conf
  127. # login user from keytab username
  128. login.user.keytab.username: hdfs-mycluster@ESZ.COM
  129. # login user from keytab path
  130. login.user.keytab.path: /opt/hdfs.headless.keytab
  131. # kerberos expire time, the unit is hour
  132. kerberos.expire.time: 2
  133. # resourcemanager port, the default value is 8088 if not specified
  134. resource.manager.httpaddress.port: 8088
  135. # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
  136. yarn.resourcemanager.ha.rm.ids: 192.168.xx.xx,192.168.xx.xx
  137. # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
  138. yarn.application.status.address: http://ds1:%s/ws/v1/cluster/apps/%s
  139. # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
  140. yarn.job.history.status.address: http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
  141. # datasource encryption enable
  142. datasource.encryption.enable: false
  143. # datasource encryption salt
  144. datasource.encryption.salt: '!@#$%^&*'
  145. # data quality option
  146. data-quality.jar.name: dolphinscheduler-data-quality-dev-SNAPSHOT.jar
  147. # Whether hive SQL is executed in the same session
  148. support.hive.oneSession: false
  149. # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
  150. sudo.enable: true
  151. # development state
  152. development.state: false
  153. # rpc port
  154. alert.rpc.port: 50052
  155. # set path of conda.sh
  156. conda.path: /opt/anaconda3/etc/profile.d/conda.sh
  157. # Task resource limit state
  158. task.resource.limit.state: false
  159. # mlflow task plugin preset repository
  160. ml.mlflow.preset_repository: https://github.com/apache/dolphinscheduler-mlflow
  161. # mlflow task plugin preset repository version
  162. ml.mlflow.preset_repository_version: "main"
  163. # way to collect applicationId: log, aop
  164. appId.collect: log
  165. common:
  166. ## Configmap
  167. configmap:
  168. DOLPHINSCHEDULER_OPTS: ""
  169. DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
  170. RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
  171. # dolphinscheduler env
  172. HADOOP_HOME: "/opt/soft/hadoop"
  173. HADOOP_CONF_DIR: "/opt/soft/hadoop/etc/hadoop"
  174. SPARK_HOME: "/opt/soft/spark"
  175. PYTHON_HOME: "/usr/bin/python"
  176. JAVA_HOME: "/opt/java/openjdk"
  177. HIVE_HOME: "/opt/soft/hive"
  178. FLINK_HOME: "/opt/soft/flink"
  179. DATAX_HOME: "/opt/soft/datax"
  180. ## Shared storage persistence mounted into api, master and worker, such as Hadoop, Spark, Flink and DataX binary package
  181. sharedStoragePersistence:
  182. enabled: false
  183. mountPath: "/opt/soft"
  184. accessModes:
  185. - "ReadWriteMany"
  186. ## storageClassName must support the access mode: ReadWriteMany
  187. storageClassName: "-"
  188. storage: "20Gi"
  189. ## If RESOURCE_STORAGE_TYPE is HDFS and FS_DEFAULT_FS is file:///, fsFileResourcePersistence should be enabled for resource storage
  190. fsFileResourcePersistence:
  191. enabled: false
  192. accessModes:
  193. - "ReadWriteMany"
  194. ## storageClassName must support the access mode: ReadWriteMany
  195. storageClassName: "-"
  196. storage: "20Gi"
  197. master:
  198. ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  199. podManagementPolicy: "Parallel"
  200. ## Replicas is the desired number of replicas of the given Template.
  201. replicas: "3"
  202. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  203. ## Clients such as tools and libraries can retrieve this metadata.
  204. annotations: {}
  205. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  206. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  207. affinity: {}
  208. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  209. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  210. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  211. nodeSelector: {}
  212. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  213. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  214. tolerations: []
  215. ## Compute Resources required by this container. Cannot be updated.
  216. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  217. resources: {}
  218. # resources:
  219. # limits:
  220. # memory: "8Gi"
  221. # cpu: "4"
  222. # requests:
  223. # memory: "2Gi"
  224. # cpu: "500m"
  225. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  226. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  227. livenessProbe:
  228. enabled: true
  229. initialDelaySeconds: "30"
  230. periodSeconds: "30"
  231. timeoutSeconds: "5"
  232. failureThreshold: "3"
  233. successThreshold: "1"
  234. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  235. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  236. readinessProbe:
  237. enabled: true
  238. initialDelaySeconds: "30"
  239. periodSeconds: "30"
  240. timeoutSeconds: "5"
  241. failureThreshold: "3"
  242. successThreshold: "1"
  243. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  244. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  245. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  246. ## A claim in this list takes precedence over any volumes in the template, with the same name.
  247. persistentVolumeClaim:
  248. enabled: false
  249. accessModes:
  250. - "ReadWriteOnce"
  251. storageClassName: "-"
  252. storage: "20Gi"
  253. env:
  254. JAVA_OPTS: "-Xms1g -Xmx1g -Xmn512m"
  255. MASTER_EXEC_THREADS: "100"
  256. MASTER_EXEC_TASK_NUM: "20"
  257. MASTER_DISPATCH_TASK_NUM: "3"
  258. MASTER_HOST_SELECTOR: "LowerWeight"
  259. MASTER_HEARTBEAT_INTERVAL: "10s"
  260. MASTER_HEARTBEAT_ERROR_THRESHOLD: "5"
  261. MASTER_TASK_COMMIT_RETRYTIMES: "5"
  262. MASTER_TASK_COMMIT_INTERVAL: "1s"
  263. MASTER_STATE_WHEEL_INTERVAL: "5s"
  264. MASTER_MAX_CPU_LOAD_AVG: "1"
  265. MASTER_RESERVED_MEMORY: "0.3"
  266. MASTER_FAILOVER_INTERVAL: "10m"
  267. MASTER_KILL_APPLICATION_WHEN_HANDLE_FAILOVER: "true"
  268. worker:
  269. ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  270. podManagementPolicy: "Parallel"
  271. ## Replicas is the desired number of replicas of the given Template.
  272. replicas: "3"
  273. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  274. ## Clients such as tools and libraries can retrieve this metadata.
  275. annotations: {}
  276. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  277. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  278. affinity: {}
  279. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  280. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  281. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  282. nodeSelector: {}
  283. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  284. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  285. tolerations: []
  286. ## Compute Resources required by this container. Cannot be updated.
  287. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  288. resources: {}
  289. # resources:
  290. # limits:
  291. # memory: "8Gi"
  292. # cpu: "4"
  293. # requests:
  294. # memory: "2Gi"
  295. # cpu: "500m"
  296. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  297. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  298. livenessProbe:
  299. enabled: true
  300. initialDelaySeconds: "30"
  301. periodSeconds: "30"
  302. timeoutSeconds: "5"
  303. failureThreshold: "3"
  304. successThreshold: "1"
  305. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  306. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  307. readinessProbe:
  308. enabled: true
  309. initialDelaySeconds: "30"
  310. periodSeconds: "30"
  311. timeoutSeconds: "5"
  312. failureThreshold: "3"
  313. successThreshold: "1"
  314. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  315. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  316. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  317. ## A claim in this list takes precedence over any volumes in the template, with the same name.
  318. persistentVolumeClaim:
  319. enabled: false
  320. ## dolphinscheduler data volume
  321. dataPersistentVolume:
  322. enabled: false
  323. accessModes:
  324. - "ReadWriteOnce"
  325. storageClassName: "-"
  326. storage: "20Gi"
  327. ## dolphinscheduler logs volume
  328. logsPersistentVolume:
  329. enabled: false
  330. accessModes:
  331. - "ReadWriteOnce"
  332. storageClassName: "-"
  333. storage: "20Gi"
  334. env:
  335. WORKER_MAX_CPU_LOAD_AVG: "1"
  336. WORKER_RESERVED_MEMORY: "0.3"
  337. WORKER_EXEC_THREADS: "100"
  338. WORKER_HEARTBEAT_INTERVAL: "10s"
  339. WORKER_HEART_ERROR_THRESHOLD: "5"
  340. WORKER_HOST_WEIGHT: "100"
  341. keda:
  342. enabled: false
  343. namespaceLabels: { }
  344. # How often KEDA polls the DolphinScheduler DB to report new scale requests to the HPA
  345. pollingInterval: 5
  346. # How many seconds KEDA will wait before scaling to zero.
  347. # Note that HPA has a separate cooldown period for scale-downs
  348. cooldownPeriod: 30
  349. # Minimum number of workers created by keda
  350. minReplicaCount: 0
  351. # Maximum number of workers created by keda
  352. maxReplicaCount: 3
  353. # Specify HPA related options
  354. advanced: { }
  355. # horizontalPodAutoscalerConfig:
  356. # behavior:
  357. # scaleDown:
  358. # stabilizationWindowSeconds: 300
  359. # policies:
  360. # - type: Percent
  361. # value: 100
  362. # periodSeconds: 15
  363. alert:
  364. ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  365. replicas: 1
  366. ## The deployment strategy to use to replace existing pods with new ones.
  367. strategy:
  368. type: "RollingUpdate"
  369. rollingUpdate:
  370. maxSurge: "25%"
  371. maxUnavailable: "25%"
  372. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  373. ## Clients such as tools and libraries can retrieve this metadata.
  374. annotations: {}
  375. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  376. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  377. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  378. affinity: {}
  379. ## Compute Resources required by this container. Cannot be updated.
  380. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  381. nodeSelector: {}
  382. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  383. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  384. tolerations: []
  385. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  386. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  387. resources: {}
  388. # resources:
  389. # limits:
  390. # memory: "2Gi"
  391. # cpu: "1"
  392. # requests:
  393. # memory: "1Gi"
  394. # cpu: "500m"
  395. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  396. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  397. livenessProbe:
  398. enabled: true
  399. initialDelaySeconds: "30"
  400. periodSeconds: "30"
  401. timeoutSeconds: "5"
  402. failureThreshold: "3"
  403. successThreshold: "1"
  404. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  405. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  406. readinessProbe:
  407. enabled: true
  408. initialDelaySeconds: "30"
  409. periodSeconds: "30"
  410. timeoutSeconds: "5"
  411. failureThreshold: "3"
  412. successThreshold: "1"
  413. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  414. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  415. persistentVolumeClaim:
  416. enabled: false
  417. accessModes:
  418. - "ReadWriteOnce"
  419. storageClassName: "-"
  420. storage: "20Gi"
  421. env:
  422. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  423. api:
  424. ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  425. replicas: "1"
  426. ## The deployment strategy to use to replace existing pods with new ones.
  427. strategy:
  428. type: "RollingUpdate"
  429. rollingUpdate:
  430. maxSurge: "25%"
  431. maxUnavailable: "25%"
  432. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  433. ## Clients such as tools and libraries can retrieve this metadata.
  434. annotations: {}
  435. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  436. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  437. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  438. affinity: {}
  439. ## Compute Resources required by this container. Cannot be updated.
  440. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  441. nodeSelector: {}
  442. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  443. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  444. tolerations: []
  445. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  446. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  447. resources: {}
  448. # resources:
  449. # limits:
  450. # memory: "2Gi"
  451. # cpu: "1"
  452. # requests:
  453. # memory: "1Gi"
  454. # cpu: "500m"
  455. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  456. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  457. livenessProbe:
  458. enabled: true
  459. initialDelaySeconds: "30"
  460. periodSeconds: "30"
  461. timeoutSeconds: "5"
  462. failureThreshold: "3"
  463. successThreshold: "1"
  464. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  465. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  466. readinessProbe:
  467. enabled: true
  468. initialDelaySeconds: "30"
  469. periodSeconds: "30"
  470. timeoutSeconds: "5"
  471. failureThreshold: "3"
  472. successThreshold: "1"
  473. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  474. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  475. persistentVolumeClaim:
  476. enabled: false
  477. accessModes:
  478. - "ReadWriteOnce"
  479. storageClassName: "-"
  480. storage: "20Gi"
  481. service:
  482. ## type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer
  483. type: "ClusterIP"
  484. ## clusterIP is the IP address of the service and is usually assigned randomly by the master
  485. clusterIP: ""
  486. ## nodePort is the port on each node on which this api service is exposed when type=NodePort
  487. nodePort: ""
  488. ## pythonNodePort is the port on each node on which this python api service is exposed when type=NodePort
  489. pythonNodePort: ""
  490. ## externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service
  491. externalIPs: []
  492. ## externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service, requires Type to be ExternalName
  493. externalName: ""
  494. ## loadBalancerIP when service.type is LoadBalancer. LoadBalancer will get created with the IP specified in this field
  495. loadBalancerIP: ""
  496. ## annotations may need to be set when service.type is LoadBalancer
  497. ## service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
  498. annotations: {}
  499. env:
  500. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  501. ingress:
  502. enabled: false
  503. host: "dolphinscheduler.org"
  504. path: "/dolphinscheduler"
  505. annotations: {}
  506. tls:
  507. enabled: false
  508. secretName: "dolphinscheduler-tls"