values.yaml 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. #
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. #
  17. # Default values for dolphinscheduler-chart.
  18. # This is a YAML-formatted file.
  19. # Declare variables to be passed into your templates.
  20. timezone: "Asia/Shanghai"
  21. image:
  22. registry: "dolphinscheduler.docker.scarf.sh/apache"
  23. tag: "dev-SNAPSHOT"
  24. pullPolicy: "IfNotPresent"
  25. pullSecret: ""
  26. ## If not exists external database, by default, Dolphinscheduler's database will use it.
  27. postgresql:
  28. enabled: true
  29. postgresqlUsername: "root"
  30. postgresqlPassword: "root"
  31. postgresqlDatabase: "dolphinscheduler"
  32. persistence:
  33. enabled: false
  34. size: "20Gi"
  35. storageClass: "-"
  36. ## If exists external database, and set postgresql.enable value to false.
  37. ## external database will be used, otherwise Dolphinscheduler's database will be used.
  38. externalDatabase:
  39. type: "postgresql"
  40. host: "localhost"
  41. port: "5432"
  42. username: "root"
  43. password: "root"
  44. database: "dolphinscheduler"
  45. params: "characterEncoding=utf8"
  46. ## If not exists external registry, the zookeeper registry will be used by default.
  47. zookeeper:
  48. enabled: true
  49. service:
  50. port: 2181
  51. fourlwCommandsWhitelist: "srvr,ruok,wchs,cons"
  52. persistence:
  53. enabled: false
  54. size: "20Gi"
  55. storageClass: "-"
  56. ## If exists external registry and set zookeeper.enable value to false, the external registry will be used.
  57. externalRegistry:
  58. registryPluginDir: "lib/plugin/registry"
  59. registryPluginName: "zookeeper"
  60. registryServers: "127.0.0.1:2181"
  61. conf:
  62. common:
  63. # user data local directory path, please make sure the directory exists and have read write permissions
  64. data.basedir.path: /tmp/dolphinscheduler
  65. # resource storage type: HDFS, S3, NONE
  66. resource.storage.type: HDFS
  67. # resource store on HDFS/S3 path, resource file will store to this base path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
  68. resource.storage.upload.base.path: /dolphinscheduler
  69. # whether to startup kerberos
  70. hadoop.security.authentication.startup.state: false
  71. # java.security.krb5.conf path
  72. java.security.krb5.conf.path: /opt/krb5.conf
  73. # login user from keytab username
  74. login.user.keytab.username: hdfs-mycluster@ESZ.COM
  75. # login user from keytab path
  76. login.user.keytab.path: /opt/hdfs.headless.keytab
  77. # kerberos expire time, the unit is hour
  78. kerberos.expire.time: 2
  79. # resource view suffixs
  80. #resource.view.suffixs: txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
  81. # if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
  82. resource.hdfs.root.user: hdfs
  83. # if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
  84. resource.hdfs.fs.defaultFS: hdfs://mycluster:8020
  85. # The AWS access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  86. resource.aws.access.key.id: minioadmin
  87. # The AWS secret access key. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  88. resource.aws.secret.access.key: minioadmin
  89. # The AWS Region to use. if resource.storage.type=S3 or use EMR-Task, This configuration is required
  90. resource.aws.region: cn-north-1
  91. # The name of the bucket. You need to create them by yourself. Otherwise, the system cannot start. All buckets in Amazon S3 share a single namespace; ensure the bucket is given a unique name.
  92. resource.aws.s3.bucket.name: dolphinscheduler
  93. # You need to set this parameter when private cloud s3. If S3 uses public cloud, you only need to set resource.aws.region or set to the endpoint of a public cloud such as S3.cn-north-1.amazonaws.com.cn
  94. resource.aws.s3.endpoint: http://localhost:9000
  95. # resourcemanager port, the default value is 8088 if not specified
  96. resource.manager.httpaddress.port: 8088
  97. # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
  98. yarn.resourcemanager.ha.rm.ids: 192.168.xx.xx,192.168.xx.xx
  99. # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
  100. yarn.application.status.address: http://ds1:%s/ws/v1/cluster/apps/%s
  101. # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
  102. yarn.job.history.status.address: http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
  103. # datasource encryption enable
  104. datasource.encryption.enable: false
  105. # datasource encryption salt
  106. datasource.encryption.salt: '!@#$%^&*'
  107. # data quality option
  108. data-quality.jar.name: dolphinscheduler-data-quality-dev-SNAPSHOT.jar
  109. #data-quality.error.output.path: /tmp/data-quality-error-data
  110. # Network IP gets priority, default inner outer
  111. # Whether hive SQL is executed in the same session
  112. support.hive.oneSession: false
  113. # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
  114. sudo.enable: true
  115. # network interface preferred like eth0, default: empty
  116. #dolphin.scheduler.network.interface.preferred:
  117. # network IP gets priority, default: inner outer
  118. #dolphin.scheduler.network.priority.strategy: default
  119. # system env path
  120. #dolphinscheduler.env.path: dolphinscheduler_env.sh
  121. # development state
  122. development.state: false
  123. # rpc port
  124. alert.rpc.port: 50052
  125. # Url endpoint for zeppelin RESTful API
  126. zeppelin.rest.url: http://localhost:8080
  127. common:
  128. ## Configmap
  129. configmap:
  130. DOLPHINSCHEDULER_OPTS: ""
  131. DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
  132. RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
  133. # dolphinscheduler env
  134. HADOOP_HOME: "/opt/soft/hadoop"
  135. HADOOP_CONF_DIR: "/opt/soft/hadoop/etc/hadoop"
  136. SPARK_HOME1: "/opt/soft/spark1"
  137. SPARK_HOME2: "/opt/soft/spark2"
  138. PYTHON_HOME: "/usr/bin/python"
  139. JAVA_HOME: "/usr/local/openjdk-8"
  140. HIVE_HOME: "/opt/soft/hive"
  141. FLINK_HOME: "/opt/soft/flink"
  142. DATAX_HOME: "/opt/soft/datax"
  143. ## Shared storage persistence mounted into api, master and worker, such as Hadoop, Spark, Flink and DataX binary package
  144. sharedStoragePersistence:
  145. enabled: false
  146. mountPath: "/opt/soft"
  147. accessModes:
  148. - "ReadWriteMany"
  149. ## storageClassName must support the access mode: ReadWriteMany
  150. storageClassName: "-"
  151. storage: "20Gi"
  152. ## If RESOURCE_STORAGE_TYPE is HDFS and FS_DEFAULT_FS is file:///, fsFileResourcePersistence should be enabled for resource storage
  153. fsFileResourcePersistence:
  154. enabled: false
  155. accessModes:
  156. - "ReadWriteMany"
  157. ## storageClassName must support the access mode: ReadWriteMany
  158. storageClassName: "-"
  159. storage: "20Gi"
  160. master:
  161. ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  162. podManagementPolicy: "Parallel"
  163. ## Replicas is the desired number of replicas of the given Template.
  164. replicas: "3"
  165. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  166. ## Clients such as tools and libraries can retrieve this metadata.
  167. annotations: {}
  168. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  169. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  170. affinity: {}
  171. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  172. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  173. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  174. nodeSelector: {}
  175. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  176. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  177. tolerations: []
  178. ## Compute Resources required by this container. Cannot be updated.
  179. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  180. resources: {}
  181. # resources:
  182. # limits:
  183. # memory: "8Gi"
  184. # cpu: "4"
  185. # requests:
  186. # memory: "2Gi"
  187. # cpu: "500m"
  188. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  189. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  190. livenessProbe:
  191. enabled: true
  192. initialDelaySeconds: "30"
  193. periodSeconds: "30"
  194. timeoutSeconds: "5"
  195. failureThreshold: "3"
  196. successThreshold: "1"
  197. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  198. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  199. readinessProbe:
  200. enabled: true
  201. initialDelaySeconds: "30"
  202. periodSeconds: "30"
  203. timeoutSeconds: "5"
  204. failureThreshold: "3"
  205. successThreshold: "1"
  206. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  207. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  208. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  209. ## A claim in this list takes precedence over any volumes in the template, with the same name.
  210. persistentVolumeClaim:
  211. enabled: false
  212. accessModes:
  213. - "ReadWriteOnce"
  214. storageClassName: "-"
  215. storage: "20Gi"
  216. env:
  217. JAVA_OPTS: "-Xms1g -Xmx1g -Xmn512m"
  218. MASTER_EXEC_THREADS: "100"
  219. MASTER_EXEC_TASK_NUM: "20"
  220. MASTER_DISPATCH_TASK_NUM: "3"
  221. MASTER_HOST_SELECTOR: "LowerWeight"
  222. MASTER_HEARTBEAT_INTERVAL: "10s"
  223. MASTER_HEARTBEAT_ERROR_THRESHOLD: "5"
  224. MASTER_TASK_COMMIT_RETRYTIMES: "5"
  225. MASTER_TASK_COMMIT_INTERVAL: "1s"
  226. MASTER_STATE_WHEEL_INTERVAL: "5s"
  227. MASTER_MAX_CPU_LOAD_AVG: "-1"
  228. MASTER_RESERVED_MEMORY: "0.3"
  229. MASTER_FAILOVER_INTERVAL: "10m"
  230. MASTER_KILL_YARN_JOB_WHEN_HANDLE_FAILOVER: "true"
  231. worker:
  232. ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  233. podManagementPolicy: "Parallel"
  234. ## Replicas is the desired number of replicas of the given Template.
  235. replicas: "3"
  236. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  237. ## Clients such as tools and libraries can retrieve this metadata.
  238. annotations: {}
  239. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  240. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  241. affinity: {}
  242. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  243. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  244. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  245. nodeSelector: {}
  246. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  247. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  248. tolerations: []
  249. ## Compute Resources required by this container. Cannot be updated.
  250. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  251. resources: {}
  252. # resources:
  253. # limits:
  254. # memory: "8Gi"
  255. # cpu: "4"
  256. # requests:
  257. # memory: "2Gi"
  258. # cpu: "500m"
  259. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  260. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  261. livenessProbe:
  262. enabled: true
  263. initialDelaySeconds: "30"
  264. periodSeconds: "30"
  265. timeoutSeconds: "5"
  266. failureThreshold: "3"
  267. successThreshold: "1"
  268. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  269. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  270. readinessProbe:
  271. enabled: true
  272. initialDelaySeconds: "30"
  273. periodSeconds: "30"
  274. timeoutSeconds: "5"
  275. failureThreshold: "3"
  276. successThreshold: "1"
  277. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  278. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  279. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  280. ## A claim in this list takes precedence over any volumes in the template, with the same name.
  281. persistentVolumeClaim:
  282. enabled: false
  283. ## dolphinscheduler data volume
  284. dataPersistentVolume:
  285. enabled: false
  286. accessModes:
  287. - "ReadWriteOnce"
  288. storageClassName: "-"
  289. storage: "20Gi"
  290. ## dolphinscheduler logs volume
  291. logsPersistentVolume:
  292. enabled: false
  293. accessModes:
  294. - "ReadWriteOnce"
  295. storageClassName: "-"
  296. storage: "20Gi"
  297. env:
  298. WORKER_GROUPS_0: default
  299. WORKER_MAX_CPU_LOAD_AVG: "-1"
  300. WORKER_RESERVED_MEMORY: "0.3"
  301. WORKER_EXEC_THREADS: "100"
  302. WORKER_HEARTBEAT_INTERVAL: "10s"
  303. WORKER_HEART_ERROR_THRESHOLD: "5"
  304. WORKER_HOST_WEIGHT: "100"
  305. WORKER_GROUPS: "default"
  306. alert:
  307. ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  308. replicas: 1
  309. ## The deployment strategy to use to replace existing pods with new ones.
  310. strategy:
  311. type: "RollingUpdate"
  312. rollingUpdate:
  313. maxSurge: "25%"
  314. maxUnavailable: "25%"
  315. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  316. ## Clients such as tools and libraries can retrieve this metadata.
  317. annotations: {}
  318. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  319. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  320. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  321. affinity: {}
  322. ## Compute Resources required by this container. Cannot be updated.
  323. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  324. nodeSelector: {}
  325. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  326. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  327. tolerations: []
  328. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  329. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  330. resources: {}
  331. # resources:
  332. # limits:
  333. # memory: "2Gi"
  334. # cpu: "1"
  335. # requests:
  336. # memory: "1Gi"
  337. # cpu: "500m"
  338. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  339. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  340. livenessProbe:
  341. enabled: true
  342. initialDelaySeconds: "30"
  343. periodSeconds: "30"
  344. timeoutSeconds: "5"
  345. failureThreshold: "3"
  346. successThreshold: "1"
  347. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  348. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  349. readinessProbe:
  350. enabled: true
  351. initialDelaySeconds: "30"
  352. periodSeconds: "30"
  353. timeoutSeconds: "5"
  354. failureThreshold: "3"
  355. successThreshold: "1"
  356. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  357. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  358. persistentVolumeClaim:
  359. enabled: false
  360. accessModes:
  361. - "ReadWriteOnce"
  362. storageClassName: "-"
  363. storage: "20Gi"
  364. env:
  365. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  366. api:
  367. ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  368. replicas: "1"
  369. ## The deployment strategy to use to replace existing pods with new ones.
  370. strategy:
  371. type: "RollingUpdate"
  372. rollingUpdate:
  373. maxSurge: "25%"
  374. maxUnavailable: "25%"
  375. ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  376. ## Clients such as tools and libraries can retrieve this metadata.
  377. annotations: {}
  378. ## NodeSelector is a selector which must be true for the pod to fit on a node.
  379. ## Selector which must match a node's labels for the pod to be scheduled on that node.
  380. ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  381. affinity: {}
  382. ## Compute Resources required by this container. Cannot be updated.
  383. ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  384. nodeSelector: {}
  385. ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  386. ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  387. tolerations: []
  388. ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  389. ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  390. resources: {}
  391. # resources:
  392. # limits:
  393. # memory: "2Gi"
  394. # cpu: "1"
  395. # requests:
  396. # memory: "1Gi"
  397. # cpu: "500m"
  398. ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  399. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  400. livenessProbe:
  401. enabled: true
  402. initialDelaySeconds: "30"
  403. periodSeconds: "30"
  404. timeoutSeconds: "5"
  405. failureThreshold: "3"
  406. successThreshold: "1"
  407. ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  408. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  409. readinessProbe:
  410. enabled: true
  411. initialDelaySeconds: "30"
  412. periodSeconds: "30"
  413. timeoutSeconds: "5"
  414. failureThreshold: "3"
  415. successThreshold: "1"
  416. ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  417. ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  418. persistentVolumeClaim:
  419. enabled: false
  420. accessModes:
  421. - "ReadWriteOnce"
  422. storageClassName: "-"
  423. storage: "20Gi"
  424. service:
  425. ## type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer
  426. type: "ClusterIP"
  427. ## clusterIP is the IP address of the service and is usually assigned randomly by the master
  428. clusterIP: ""
  429. ## nodePort is the port on each node on which this service is exposed when type=NodePort
  430. nodePort: ""
  431. ## externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service
  432. externalIPs: []
  433. ## externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service, requires Type to be ExternalName
  434. externalName: ""
  435. ## loadBalancerIP when service.type is LoadBalancer. LoadBalancer will get created with the IP specified in this field
  436. loadBalancerIP: ""
  437. ## annotations may need to be set when service.type is LoadBalancer
  438. ## service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
  439. annotations: {}
  440. env:
  441. JAVA_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  442. ingress:
  443. enabled: false
  444. host: "dolphinscheduler.org"
  445. path: "/dolphinscheduler"
  446. annotations: {}
  447. tls:
  448. enabled: false
  449. secretName: "dolphinscheduler-tls"