startup-init-conf.sh 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. #!/bin/bash
  2. #
  3. # Licensed to the Apache Software Foundation (ASF) under one or more
  4. # contributor license agreements. See the NOTICE file distributed with
  5. # this work for additional information regarding copyright ownership.
  6. # The ASF licenses this file to You under the Apache License, Version 2.0
  7. # (the "License"); you may not use this file except in compliance with
  8. # the License. You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. #
  18. set -e
  19. echo "init env variables"
  20. # Define parameters default value
  21. #============================================================================
  22. # Database
  23. #============================================================================
  24. export DATABASE_TYPE=${DATABASE_TYPE:-"postgresql"}
  25. export DATABASE_DRIVER=${DATABASE_DRIVER:-"org.postgresql.Driver"}
  26. export DATABASE_HOST=${DATABASE_HOST:-"127.0.0.1"}
  27. export DATABASE_PORT=${DATABASE_PORT:-"5432"}
  28. export DATABASE_USERNAME=${DATABASE_USERNAME:-"root"}
  29. export DATABASE_PASSWORD=${DATABASE_PASSWORD:-"root"}
  30. export DATABASE_DATABASE=${DATABASE_DATABASE:-"dolphinscheduler"}
  31. export DATABASE_PARAMS=${DATABASE_PARAMS:-"characterEncoding=utf8"}
  32. #============================================================================
  33. # Registry
  34. #============================================================================
  35. export REGISTRY_PLUGIN_NAME=${REGISTRY_PLUGIN_NAME:-"zookeeper"}
  36. export REGISTRY_SERVERS=${REGISTRY_SERVERS:-"127.0.0.1:2181"}
  37. #============================================================================
  38. # Common
  39. #============================================================================
  40. # common opts
  41. export DOLPHINSCHEDULER_OPTS=${DOLPHINSCHEDULER_OPTS:-""}
  42. # common env
  43. export DATA_BASEDIR_PATH=${DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"}
  44. export RESOURCE_STORAGE_TYPE=${RESOURCE_STORAGE_TYPE:-"HDFS"}
  45. export RESOURCE_UPLOAD_PATH=${RESOURCE_UPLOAD_PATH:-"/dolphinscheduler"}
  46. export FS_DEFAULT_FS=${FS_DEFAULT_FS:-"file:///"}
  47. export FS_S3A_ENDPOINT=${FS_S3A_ENDPOINT:-"s3.xxx.amazonaws.com"}
  48. export FS_S3A_ACCESS_KEY=${FS_S3A_ACCESS_KEY:-"xxxxxxx"}
  49. export FS_S3A_SECRET_KEY=${FS_S3A_SECRET_KEY:-"xxxxxxx"}
  50. export HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE=${HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE:-"false"}
  51. export JAVA_SECURITY_KRB5_CONF_PATH=${JAVA_SECURITY_KRB5_CONF_PATH:-"/opt/krb5.conf"}
  52. export LOGIN_USER_KEYTAB_USERNAME=${LOGIN_USER_KEYTAB_USERNAME:-"hdfs@HADOOP.COM"}
  53. export LOGIN_USER_KEYTAB_PATH=${LOGIN_USER_KEYTAB_PATH:-"/opt/hdfs.keytab"}
  54. export KERBEROS_EXPIRE_TIME=${KERBEROS_EXPIRE_TIME:-"2"}
  55. export HDFS_ROOT_USER=${HDFS_ROOT_USER:-"hdfs"}
  56. export RESOURCE_MANAGER_HTTPADDRESS_PORT=${RESOURCE_MANAGER_HTTPADDRESS_PORT:-"8088"}
  57. export YARN_RESOURCEMANAGER_HA_RM_IDS=${YARN_RESOURCEMANAGER_HA_RM_IDS:-""}
  58. export YARN_APPLICATION_STATUS_ADDRESS=${YARN_APPLICATION_STATUS_ADDRESS:-"http://ds1:%s/ws/v1/cluster/apps/%s"}
  59. export YARN_JOB_HISTORY_STATUS_ADDRESS=${YARN_JOB_HISTORY_STATUS_ADDRESS:-"http://ds1:19888/ws/v1/history/mapreduce/jobs/%s"}
  60. export DATASOURCE_ENCRYPTION_ENABLE=${DATASOURCE_ENCRYPTION_ENABLE:-"false"}
  61. export DATASOURCE_ENCRYPTION_SALT=${DATASOURCE_ENCRYPTION_SALT:-"!@#$%^&*"}
  62. export SUDO_ENABLE=${SUDO_ENABLE:-"true"}
  63. # dolphinscheduler env
  64. export HADOOP_HOME=${HADOOP_HOME:-"/opt/soft/hadoop"}
  65. export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/opt/soft/hadoop/etc/hadoop"}
  66. export SPARK_HOME1=${SPARK_HOME1:-"/opt/soft/spark1"}
  67. export SPARK_HOME2=${SPARK_HOME2:-"/opt/soft/spark2"}
  68. export PYTHON_HOME=${PYTHON_HOME:-"/usr/bin/python"}
  69. export JAVA_HOME=${JAVA_HOME:-"/usr/local/openjdk-8"}
  70. export HIVE_HOME=${HIVE_HOME:-"/opt/soft/hive"}
  71. export FLINK_HOME=${FLINK_HOME:-"/opt/soft/flink"}
  72. export DATAX_HOME=${DATAX_HOME:-"/opt/soft/datax"}
  73. #============================================================================
  74. # Master Server
  75. #============================================================================
  76. export MASTER_SERVER_OPTS=${MASTER_SERVER_OPTS:-"-Xms1g -Xmx1g -Xmn512m"}
  77. export MASTER_EXEC_THREADS=${MASTER_EXEC_THREADS:-"100"}
  78. export MASTER_EXEC_TASK_NUM=${MASTER_EXEC_TASK_NUM:-"20"}
  79. export MASTER_DISPATCH_TASK_NUM=${MASTER_DISPATCH_TASK_NUM:-"3"}
  80. export MASTER_HOST_SELECTOR=${MASTER_HOST_SELECTOR:-"LowerWeight"}
  81. export MASTER_HEARTBEAT_INTERVAL=${MASTER_HEARTBEAT_INTERVAL:-"10"}
  82. export MASTER_TASK_COMMIT_RETRYTIMES=${MASTER_TASK_COMMIT_RETRYTIMES:-"5"}
  83. export MASTER_TASK_COMMIT_INTERVAL=${MASTER_TASK_COMMIT_INTERVAL:-"1000"}
  84. export MASTER_MAX_CPULOAD_AVG=${MASTER_MAX_CPULOAD_AVG:-"-1"}
  85. export MASTER_RESERVED_MEMORY=${MASTER_RESERVED_MEMORY:-"0.3"}
  86. #============================================================================
  87. # Worker Server
  88. #============================================================================
  89. export WORKER_SERVER_OPTS=${WORKER_SERVER_OPTS:-"-Xms1g -Xmx1g -Xmn512m"}
  90. export WORKER_EXEC_THREADS=${WORKER_EXEC_THREADS:-"100"}
  91. export WORKER_HEARTBEAT_INTERVAL=${WORKER_HEARTBEAT_INTERVAL:-"10"}
  92. export WORKER_HOST_WEIGHT=${WORKER_HOST_WEIGHT:-"100"}
  93. export WORKER_MAX_CPULOAD_AVG=${WORKER_MAX_CPULOAD_AVG:-"-1"}
  94. export WORKER_RESERVED_MEMORY=${WORKER_RESERVED_MEMORY:-"0.3"}
  95. export WORKER_GROUPS=${WORKER_GROUPS:-"default"}
  96. export ALERT_LISTEN_HOST=${ALERT_LISTEN_HOST:-"localhost"}
  97. #============================================================================
  98. # Alert Server
  99. #============================================================================
  100. export ALERT_SERVER_OPTS=${ALERT_SERVER_OPTS:-"-Xms512m -Xmx512m -Xmn256m"}
  101. #============================================================================
  102. # Api Server
  103. #============================================================================
  104. export API_SERVER_OPTS=${API_SERVER_OPTS:-"-Xms512m -Xmx512m -Xmn256m"}
  105. #============================================================================
  106. # Logger Server
  107. #============================================================================
  108. export LOGGER_SERVER_OPTS=${LOGGER_SERVER_OPTS:-"-Xms512m -Xmx512m -Xmn256m"}
  109. echo "generate dolphinscheduler config"
  110. ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do
  111. eval "cat << EOF
  112. $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
  113. EOF
  114. " > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
  115. done
  116. # generate dolphinscheduler env
  117. DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_HOME}/conf/env/dolphinscheduler_env.sh
  118. if [ -r "${DOLPHINSCHEDULER_ENV_PATH}.tpl" ]; then
  119. eval "cat << EOF
  120. $(cat ${DOLPHINSCHEDULER_ENV_PATH}.tpl)
  121. EOF
  122. " > ${DOLPHINSCHEDULER_ENV_PATH}
  123. chmod +x ${DOLPHINSCHEDULER_ENV_PATH}
  124. fi