values.yaml 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. #
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. #
  17. # Default values for dolphinscheduler-chart.
  18. # This is a YAML-formatted file.
  19. # Declare variables to be passed into your templates.
  20. timezone: "Asia/Shanghai"
  21. image:
  22. registry: "dolphinscheduler.docker.scarf.sh/apache"
  23. tag: "dev-SNAPSHOT"
  24. pullPolicy: "IfNotPresent"
  25. pullSecret: ""
  26. ## If not exists external database, by default, Dolphinscheduler's database will use it.
  27. postgresql:
  28. enabled: true
  29. postgresqlUsername: "root"
  30. postgresqlPassword: "root"
  31. postgresqlDatabase: "dolphinscheduler"
  32. persistence:
  33. enabled: false
  34. size: "20Gi"
  35. storageClass: "-"
  36. ## If exists external database, and set postgresql.enable value to false.
  37. ## external database will be used, otherwise Dolphinscheduler's database will be used.
  38. externalDatabase:
  39. type: "postgresql"
  40. host: "localhost"
  41. port: "5432"
  42. username: "root"
  43. password: "root"
  44. database: "dolphinscheduler"
  45. params: "characterEncoding=utf8"
  46. ## If not exists external registry, the zookeeper registry will be used by default.
  47. zookeeper:
  48. enabled: true
  49. service:
  50. port: 2181
  51. fourlwCommandsWhitelist: "srvr,ruok,wchs,cons"
  52. persistence:
  53. enabled: false
  54. size: "20Gi"
  55. storageClass: "-"
  56. ## If exists external registry and set zookeeper.enable value to false, the external registry will be used.
  57. externalRegistry:
  58. registryPluginDir: "lib/plugin/registry"
  59. registryPluginName: "zookeeper"
  60. registryServers: "127.0.0.1:2181"
  61. conf:
  62. common:
  63. # user data local directory path, please make sure the directory exists and have read write permissions
  64. data.basedir.path: /tmp/dolphinscheduler
  65. # resource storage type: HDFS, S3, NONE
  66. resource.storage.type: HDFS
  67. # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
  68. resource.upload.path: /dolphinscheduler
  69. # whether to startup kerberos
  70. hadoop.security.authentication.startup.state: false
  71. # java.security.krb5.conf path
  72. java.security.krb5.conf.path: /opt/krb5.conf
  73. # login user from keytab username
  74. login.user.keytab.username: hdfs-mycluster@ESZ.COM
  75. # login user from keytab path
  76. login.user.keytab.path: /opt/hdfs.headless.keytab
  77. # kerberos expire time, the unit is hour
  78. kerberos.expire.time: 2
  79. # resource view suffixs
  80. #resource.view.suffixs: txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
  81. # if resource.storage.type: HDFS, the user must have the permission to create directories under the HDFS root path
  82. hdfs.root.user: hdfs
  83. # if resource.storage.type: S3, the value like: s3a://dolphinscheduler; if resource.storage.type: HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
  84. fs.defaultFS: file:///
  85. aws.access.key.id: minioadmin
  86. aws.secret.access.key: minioadmin
  87. aws.region: us-east-1
  88. aws.endpoint: http://localhost:9000
  89. # resourcemanager port, the default value is 8088 if not specified
  90. resource.manager.httpaddress.port: 8088
  91. # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
  92. yarn.resourcemanager.ha.rm.ids: 192.168.xx.xx,192.168.xx.xx
  93. # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
  94. yarn.application.status.address: http://ds1:%s/ws/v1/cluster/apps/%s
  95. # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
  96. yarn.job.history.status.address: http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
  97. # datasource encryption enable
  98. datasource.encryption.enable: false
  99. # datasource encryption salt
  100. datasource.encryption.salt: '!@#$%^&*'
  101. # data quality option
  102. data-quality.jar.name: dolphinscheduler-data-quality-dev-SNAPSHOT.jar
  103. #data-quality.error.output.path: /tmp/data-quality-error-data
  104. # Network IP gets priority, default inner outer
  105. # Whether hive SQL is executed in the same session
  106. support.hive.oneSession: false
  107. # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
  108. sudo.enable: true
  109. # network interface preferred like eth0, default: empty
  110. #dolphin.scheduler.network.interface.preferred:
  111. # network IP gets priority, default: inner outer
  112. #dolphin.scheduler.network.priority.strategy: default
  113. # system env path
  114. #dolphinscheduler.env.path: dolphinscheduler_env.sh
  115. # development state
  116. development.state: false
  117. # rpc port
  118. alert.rpc.port: 50052
  119. # Url endpoint for zeppelin RESTful API
  120. zeppelin.rest.url: http://localhost:8080
  121. common:
  122. ## Configmap
  123. configmap:
  124. DOLPHINSCHEDULER_OPTS: ""
  125. DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
  126. RESOURCE_STORAGE_TYPE: "HDFS"
  127. RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
  128. FS_DEFAULT_FS: "file:///"
  129. FS_S3A_ENDPOINT: "s3.xxx.amazonaws.com"
  130. FS_S3A_ACCESS_KEY: "xxxxxxx"
  131. FS_S3A_SECRET_KEY: "xxxxxxx"
  132. HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE: "false"
  133. JAVA_SECURITY_KRB5_CONF_PATH: "/opt/krb5.conf"
  134. LOGIN_USER_KEYTAB_USERNAME: "hdfs@HADOOP.COM"
  135. LOGIN_USER_KEYTAB_PATH: "/opt/hdfs.keytab"
  136. KERBEROS_EXPIRE_TIME: "2"
  137. HDFS_ROOT_USER: "hdfs"
  138. RESOURCE_MANAGER_HTTPADDRESS_PORT: "8088"
  139. YARN_RESOURCEMANAGER_HA_RM_IDS: ""
  140. YARN_APPLICATION_STATUS_ADDRESS: "http://ds1:%s/ws/v1/cluster/apps/%s"
  141. YARN_JOB_HISTORY_STATUS_ADDRESS: "http://ds1:19888/ws/v1/history/mapreduce/jobs/%s"
  142. DATASOURCE_ENCRYPTION_ENABLE: "false"
  143. DATASOURCE_ENCRYPTION_SALT: "!@#$%^&*"
  144. SUDO_ENABLE: "true"
  145. # dolphinscheduler env
  146. HADOOP_HOME: "/opt/soft/hadoop"
  147. HADOOP_CONF_DIR: "/opt/soft/hadoop/etc/hadoop"
  148. SPARK_HOME1: "/opt/soft/spark1"
  149. SPARK_HOME2: "/opt/soft/spark2"
  150. PYTHON_HOME: "/usr/bin/python"
  151. JAVA_HOME: "/usr/local/openjdk-8"
  152. HIVE_HOME: "/opt/soft/hive"
  153. FLINK_HOME: "/opt/soft/flink"
  154. DATAX_HOME: "/opt/soft/datax"
  155. ## Shared storage persistence mounted into api, master and worker, such as Hadoop, Spark, Flink and DataX binary package
  156. sharedStoragePersistence:
  157. enabled: false
  158. mountPath: "/opt/soft"
  159. accessModes:
  160. - "ReadWriteMany"
  161. ## storageClassName must support the access mode: ReadWriteMany
  162. storageClassName: "-"
  163. storage: "20Gi"
  164. ## If RESOURCE_STORAGE_TYPE is HDFS and FS_DEFAULT_FS is file:///, fsFileResourcePersistence should be enabled for resource storage
  165. fsFileResourcePersistence:
  166. enabled: false
  167. accessModes:
  168. - "ReadWriteMany"
  169. ## storageClassName must support the access mode: ReadWriteMany
  170. storageClassName: "-"
  171. storage: "20Gi"
  172. master:
  173. ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  174. podManagementPolicy: "Parallel"
  175. ## Replicas is the desired number of replicas of the given Template.
  176. replicas: "3"
  177. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  178. ## Clients such as tools and libraries can retrieve this metadata.
  179. annotations: {}
  180. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  181. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  182. affinity: {}
  183. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  184. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  185. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  186. nodeSelector: {}
  187. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  188. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  189. tolerations: []
  190. ## Compute Resources required by this container. Cannot be updated.
  191. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  192. resources: {}
  193. # resources:
  194. # limits:
  195. # memory: "8Gi"
  196. # cpu: "4"
  197. # requests:
  198. # memory: "2Gi"
  199. # cpu: "500m"
  200. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  201. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  202. livenessProbe:
  203. enabled: true
  204. initialDelaySeconds: "30"
  205. periodSeconds: "30"
  206. timeoutSeconds: "5"
  207. failureThreshold: "3"
  208. successThreshold: "1"
  209. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  210. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  211. readinessProbe:
  212. enabled: true
  213. initialDelaySeconds: "30"
  214. periodSeconds: "30"
  215. timeoutSeconds: "5"
  216. failureThreshold: "3"
  217. successThreshold: "1"
  218. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  219. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  220. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  221. ## A claim in this list takes precedence over any volumes in the template, with the same name.
  222. persistentVolumeClaim:
  223. enabled: false
  224. accessModes:
  225. - "ReadWriteOnce"
  226. storageClassName: "-"
  227. storage: "20Gi"
  228. env:
  229. JAVA_OPTS: "-Xms1g -Xmx1g -Xmn512m"
  230. MASTER_EXEC_THREADS: "100"
  231. MASTER_EXEC_TASK_NUM: "20"
  232. MASTER_DISPATCH_TASK_NUM: "3"
  233. MASTER_HOST_SELECTOR: "LowerWeight"
  234. MASTER_HEARTBEAT_INTERVAL: "10s"
  235. MASTER_HEARTBEAT_ERROR_THRESHOLD: "5"
  236. MASTER_TASK_COMMIT_RETRYTIMES: "5"
  237. MASTER_TASK_COMMIT_INTERVAL: "1s"
  238. MASTER_STATE_WHEEL_INTERVAL: "5s"
  239. MASTER_MAX_CPU_LOAD_AVG: "-1"
  240. MASTER_RESERVED_MEMORY: "0.3"
  241. MASTER_FAILOVER_INTERVAL: "10m"
  242. MASTER_KILL_YARN_JOB_WHEN_HANDLE_FAILOVER: "true"
  243. worker:
  244. ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  245. podManagementPolicy: "Parallel"
  246. ## Replicas is the desired number of replicas of the given Template.
  247. replicas: "3"
  248. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  249. ## Clients such as tools and libraries can retrieve this metadata.
  250. annotations: {}
  251. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  252. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  253. affinity: {}
  254. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  255. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  256. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  257. nodeSelector: {}
  258. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  259. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  260. tolerations: []
  261. ## Compute Resources required by this container. Cannot be updated.
  262. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  263. resources: {}
  264. # resources:
  265. # limits:
  266. # memory: "8Gi"
  267. # cpu: "4"
  268. # requests:
  269. # memory: "2Gi"
  270. # cpu: "500m"
  271. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  272. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  273. livenessProbe:
  274. enabled: true
  275. initialDelaySeconds: "30"
  276. periodSeconds: "30"
  277. timeoutSeconds: "5"
  278. failureThreshold: "3"
  279. successThreshold: "1"
  280. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  281. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  282. readinessProbe:
  283. enabled: true
  284. initialDelaySeconds: "30"
  285. periodSeconds: "30"
  286. timeoutSeconds: "5"
  287. failureThreshold: "3"
  288. successThreshold: "1"
  289. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  290. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  291. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  292. ## A claim in this list takes precedence over any volumes in the template, with the same name.
  293. persistentVolumeClaim:
  294. enabled: false
  295. ## dolphinscheduler data volume
  296. dataPersistentVolume:
  297. enabled: false
  298. accessModes:
  299. - "ReadWriteOnce"
  300. storageClassName: "-"
  301. storage: "20Gi"
  302. ## dolphinscheduler logs volume
  303. logsPersistentVolume:
  304. enabled: false
  305. accessModes:
  306. - "ReadWriteOnce"
  307. storageClassName: "-"
  308. storage: "20Gi"
  309. env:
  310. WORKER_GROUPS_0: default
  311. WORKER_MAX_CPU_LOAD_AVG: "-1"
  312. WORKER_RESERVED_MEMORY: "0.3"
  313. WORKER_EXEC_THREADS: "100"
  314. WORKER_HEARTBEAT_INTERVAL: "10s"
  315. WORKER_HEART_ERROR_THRESHOLD: "5"
  316. WORKER_HOST_WEIGHT: "100"
  317. WORKER_GROUPS: "default"
  318. alert:
  319. ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  320. replicas: 1
  321. ## The deployment strategy to use to replace existing pods with new ones.
  322. strategy:
  323. type: "RollingUpdate"
  324. rollingUpdate:
  325. maxSurge: "25%"
  326. maxUnavailable: "25%"
  327. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  328. ## Clients such as tools and libraries can retrieve this metadata.
  329. annotations: {}
  330. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  331. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  332. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  333. affinity: {}
  334. ## Compute Resources required by this container. Cannot be updated.
  335. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  336. nodeSelector: {}
  337. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  338. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  339. tolerations: []
  340. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  341. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  342. resources: {}
  343. # resources:
  344. # limits:
  345. # memory: "2Gi"
  346. # cpu: "1"
  347. # requests:
  348. # memory: "1Gi"
  349. # cpu: "500m"
  350. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  351. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  352. livenessProbe:
  353. enabled: true
  354. initialDelaySeconds: "30"
  355. periodSeconds: "30"
  356. timeoutSeconds: "5"
  357. failureThreshold: "3"
  358. successThreshold: "1"
  359. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  360. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  361. readinessProbe:
  362. enabled: true
  363. initialDelaySeconds: "30"
  364. periodSeconds: "30"
  365. timeoutSeconds: "5"
  366. failureThreshold: "3"
  367. successThreshold: "1"
  368. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  369. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  370. persistentVolumeClaim:
  371. enabled: false
  372. accessModes:
  373. - "ReadWriteOnce"
  374. storageClassName: "-"
  375. storage: "20Gi"
  376. env:
  377. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  378. api:
  379. ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  380. replicas: "1"
  381. ## The deployment strategy to use to replace existing pods with new ones.
  382. strategy:
  383. type: "RollingUpdate"
  384. rollingUpdate:
  385. maxSurge: "25%"
  386. maxUnavailable: "25%"
  387. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  388. ## Clients such as tools and libraries can retrieve this metadata.
  389. annotations: {}
  390. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  391. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  392. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  393. affinity: {}
  394. ## Compute Resources required by this container. Cannot be updated.
  395. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  396. nodeSelector: {}
  397. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  398. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  399. tolerations: []
  400. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  401. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  402. resources: {}
  403. # resources:
  404. # limits:
  405. # memory: "2Gi"
  406. # cpu: "1"
  407. # requests:
  408. # memory: "1Gi"
  409. # cpu: "500m"
  410. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  411. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  412. livenessProbe:
  413. enabled: true
  414. initialDelaySeconds: "30"
  415. periodSeconds: "30"
  416. timeoutSeconds: "5"
  417. failureThreshold: "3"
  418. successThreshold: "1"
  419. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  420. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  421. readinessProbe:
  422. enabled: true
  423. initialDelaySeconds: "30"
  424. periodSeconds: "30"
  425. timeoutSeconds: "5"
  426. failureThreshold: "3"
  427. successThreshold: "1"
  428. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  429. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  430. persistentVolumeClaim:
  431. enabled: false
  432. accessModes:
  433. - "ReadWriteOnce"
  434. storageClassName: "-"
  435. storage: "20Gi"
  436. service:
  437. ## type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer
  438. type: "ClusterIP"
  439. ## clusterIP is the IP address of the service and is usually assigned randomly by the master
  440. clusterIP: ""
  441. ## nodePort is the port on each node on which this service is exposed when type=NodePort
  442. nodePort: ""
  443. ## externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service
  444. externalIPs: []
  445. ## externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service, requires Type to be ExternalName
  446. externalName: ""
  447. ## loadBalancerIP when service.type is LoadBalancer. LoadBalancer will get created with the IP specified in this field
  448. loadBalancerIP: ""
  449. ## annotations may need to be set when service.type is LoadBalancer
  450. ## service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
  451. annotations: {}
  452. env:
  453. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  454. ingress:
  455. enabled: false
  456. host: "dolphinscheduler.org"
  457. path: "/dolphinscheduler"
  458. annotations: {}
  459. tls:
  460. enabled: false
  461. secretName: "dolphinscheduler-tls"