zk 1 year ago
parent
commit
52ea6578a0

BIN
lib/DmJdbcDriver18.jar


+ 21 - 0
pom.xml

@@ -34,6 +34,27 @@
             <artifactId>spring-boot-starter-web</artifactId>
         </dependency>
 
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-common</artifactId>
+            <version>3.1.4</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-hdfs</artifactId>
+            <version>3.1.4</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-client</artifactId>
+            <version>3.1.4</version>
+        </dependency>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.12</version>
+        </dependency>
+
     </dependencies>
 
     <build>

+ 94 - 0
src/main/java/com/citygis/service/BigDataService.java

@@ -1,5 +1,12 @@
 package com.citygis.service;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.junit.Test;
 
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
 import java.sql.*;
 
 /**
@@ -26,4 +33,91 @@ public class BigDataService {
         ps.close();
         conn.close();
     }
+
+    @Test
+    public void runTest() throws IOException, ClassNotFoundException, SQLException {
+        Class.forName("org.apache.hive.jdbc.HiveDriver");
+        String jdbcUrl = "jdbc:hive2://192.168.20.6:10000/tpcds_holodesk_2";
+        Connection conn = DriverManager.getConnection(jdbcUrl);
+        String sql = "select i_item_id,\n" +
+                "        avg(ss_quantity) agg1,\n" +
+                "        avg(ss_list_price) agg2,\n" +
+                "        avg(ss_coupon_amt) agg3,\n" +
+                "        avg(ss_sales_price) agg4\n" +
+                " from store_sales\n" +
+                "      JOIN date_dim ON store_sales.ss_sold_date_sk = date_dim.d_date_sk\n" +
+                "      JOIN customer_demographics ON store_sales.ss_cdemo_sk = customer_demographics.cd_demo_sk\n" +
+                "      JOIN promotion ON store_sales.ss_promo_sk = promotion.p_promo_sk\n" +
+                "      JOIN item ON store_sales.ss_item_sk = item.i_item_sk\n" +
+                " where\n" +
+                "       cd_gender = 'F' and\n" +
+                "       cd_marital_status = 'W' and\n" +
+                "       cd_education_status = 'Primary' and\n" +
+                "       (p_channel_email = 'N' or p_channel_event = 'N') and\n" +
+                "       d_year = 1998\n" +
+                " group by i_item_id\n" +
+                " order by i_item_id\n" +
+                " limit 100;";
+        PreparedStatement ps = conn.prepareStatement(sql);
+        ResultSet rs = ps.executeQuery();
+        int num = rs.getMetaData().getColumnCount();
+        while (rs.next()) {
+            for (int i = 1; i <= num; i++) {
+                System.out.println("column:" + i + rs.getObject(i));
+            }
+        }
+        rs.close();
+        ps.close();
+        conn.close();
+    }
+
+
+
+    @Test
+    public void runDMTest() throws IOException, ClassNotFoundException, SQLException {
+
+        Connection con;
+        String driver = "dm.jdbc.driver.DmDriver";
+        String url = "jdbc:dm://127.0.0.1:5236/test?zeroDateTimeBehavior=convertToNull&useUnicode=true&characterEncoding=utf-8";
+        String user = "SYSDBA";
+        String password = "SYSDBA";
+        try {
+            Class.forName(driver);
+            con = DriverManager.getConnection(url,user,password);
+            if(!con.isClosed())
+                System.out.println("Succeeded connecting to the Database!");
+
+            Statement statement = con.createStatement();
+            //要执行的SQL语句
+            String sql = "select * from \"test\".\"test\"";
+            //3.ResultSet类,用来存放获取的结果集!!
+            ResultSet rs = statement.executeQuery(sql);
+            System.out.println("-----------------");
+            System.out.println("执行结果如下所示:");
+            System.out.println("-----------------");
+            System.out.println("编号" + "\t" + "姓名");
+            System.out.println("-----------------");
+
+            String id = null;
+            String name = null;
+            while(rs.next()){
+                id = rs.getString("id");
+                name = rs.getString("name");
+
+                System.out.println(id + "\t" + name);
+            }
+            rs.close();
+            con.close();
+        } catch(ClassNotFoundException e) {
+            System.out.println("Sorry,can`t find the Driver!");
+            e.printStackTrace();
+        } catch(SQLException e) {
+            //数据库连接失败异常处理
+            e.printStackTrace();
+        }catch (Exception e) {
+            e.printStackTrace();
+        }finally{
+            System.out.println("数据库数据成功获取!!");
+        }
+    }
 }

+ 66 - 0
src/main/java/com/citygis/service/HadoopTest.java

@@ -0,0 +1,66 @@
+package com.citygis.service;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+
+public class HadoopTest {
+    // 可操作HDFS文件系统的对象
+    FileSystem hdfs = null;
+
+    @Before
+    public void init() throws IOException {
+        // 构造一个配置参数对象,设置一个参数:要访问的HDFS的URI
+        Configuration conf = new Configuration();
+        // 指定使用HDFS访问
+        conf.set("fs.defaultFS","hdfs://192.168.20.5:8020");
+        // 进行客户端身份的设置(root为虚拟机的用户名,hadoop集群节点的其中一个都可以)
+        System.setProperty("HADOOP_USER_NAME","hdfs");
+        // 通过FileSystem的静态get()方法获取HDFS文件系统客户端对象
+        hdfs = FileSystem.get(conf);
+    }
+
+    @After
+    public void close() throws IOException {
+        // 关闭文件操作对象
+        hdfs.close();
+    }
+
+    @Test
+    public void testUploadFileToHDFS() throws IOException {
+        // 待上传的文件路径(windows)
+        Path src = new Path("D:\\work\\工作文件\\疾控\\fileTest/test.xlsx");
+        // 上传之后存放的路径(HDFS)
+        Path dst = new Path("/test/test.xlsx");
+        // 上传
+        hdfs.copyFromLocalFile(src, dst);
+        System.out.println("上传成功");
+    }
+
+    @Test
+    public void testDownFileToLocal() throws IOException {
+        // HDFS文件路径和本地文件路径
+        String hdfsFilePath = "/test/test.xlsx";
+        String localFilePath = "D:\\work\\工作文件\\疾控\\fileTest/test1.xlsx";
+//        // 待下载的路径(HDFS)
+//        Path src = new Path("/HDFSTest.txt");
+//        // 下载成功之后存放的路径(windows)
+//        Path dst = new Path("D:\\work\\工作文件\\疾控\\fileTest/HDFSTest1.txt");
+//        // 下载
+//        hdfs.copyToLocalFile(false,src,dst,true);
+        // 获取数据流
+        FSDataInputStream open = hdfs.open(new Path(hdfsFilePath));
+        OutputStream output = new FileOutputStream(localFilePath);
+        IOUtils.copyBytes(open, output, 4096, true);
+        System.out.println("下载成功");
+    }
+}

+ 115 - 0
src/main/resources/core-site.xml

@@ -0,0 +1,115 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<configuration>
+    <property>
+        <name>fs.defaultFS</name>
+        <value>hdfs://nameservice1</value>
+    </property>
+    <property>
+        <name>ha.zookeeper.quorum</name>
+        <value>tw-node3:2181,tw-node1:2181,tw-node2:2181</value>
+    </property>
+    <property>
+        <name>ha.zookeeper.parent-znode</name>
+        <value>/hdfs1-ha</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.hdfs.hosts</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.hdfs.groups</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.hbase.hosts</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.hbase.groups</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.hive.hosts</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.hive.groups</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.hue.hosts</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.hue.groups</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.httpfs.hosts</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.httpfs.groups</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.oozie.hosts</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.oozie.groups</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.guardian.hosts</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.guardian.groups</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.root.hosts</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>hadoop.proxyuser.root.groups</name>
+        <value>*</value>
+    </property>
+    <property>
+        <name>net.topology.node.switch.mapping.impl</name>
+        <value>org.apache.hadoop.net.ScriptBasedMapping</value>
+    </property>
+    <property>
+        <name>net.topology.script.file.name</name>
+        <value>/usr/lib/transwarp/scripts/rack_map.sh</value>
+    </property>
+    <property>
+        <name>transwarp.docker.network.hypervisor-subnetmask</name>
+        <value>255.255.255.255</value>
+    </property>
+    <property>
+        <name>ha.zookeeper.parent-znode</name>
+        <value>/hdfs1-ha</value>
+    </property>
+    <property>
+        <name>transwarp.docker.network.interfaces</name>
+        <value>eth0</value>
+    </property>
+    <property>
+        <name>dfs.ha.fencing.methods</name>
+        <value>shell(/bin/true)</value>
+    </property>
+    <property>
+        <name>transwarp.docker.enable</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>transwarp.docker.network.policy</name>
+        <value>io.transwarp.docker.policies.DockerNetworkPolicyHostGW</value>
+    </property>
+    <property>
+        <name>fs.trash.interval</name>
+        <value>10080</value>
+    </property>
+</configuration>

+ 127 - 0
src/main/resources/hdfs-site.xml

@@ -0,0 +1,127 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<configuration>
+    <property>
+        <name>dfs.nameservices</name>
+        <value>nameservice1</value>
+    </property>
+    <property>
+        <name>dfs.ha.namenodes.nameservice1</name>
+        <value>nn1,nn2</value>
+    </property>
+    <property>
+        <name>dfs.namenode.rpc-address.nameservice1.nn1</name>
+        <value>tw-node3:8020</value>
+    </property>
+    <property>
+        <name>dfs.namenode.rpc-address.nameservice1.nn2</name>
+        <value>tw-node1:8020</value>
+    </property>
+    <property>
+        <name>dfs.namenode.http-address.nameservice1.nn1</name>
+        <value>tw-node3:50070</value>
+    </property>
+    <property>
+        <name>dfs.namenode.http-address.nameservice1.nn2</name>
+        <value>tw-node1:50070</value>
+    </property>
+    <property>
+        <name>dfs.ha.automatic-failover.enabled.nameservice1</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>dfs.namenode.shared.edits.dir.nameservice1</name>
+        <value>qjournal://tw-node3:8485;tw-node1:8485;tw-node2:8485/nameservice1</value>
+    </property>
+    <property>
+        <name>dfs.client.failover.proxy.provider.nameservice1</name>
+        <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+    </property>
+    <property>
+        <name>dfs.ha.automatic-failover.enabled.nameservice1</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>dfs.journalnode.rpc-address</name>
+        <value>0.0.0.0:8485</value>
+    </property>
+    <property>
+        <name>dfs.journalnode.http-address</name>
+        <value>0.0.0.0:8480</value>
+    </property>
+    <property>
+        <name>dfs.datanode.address</name>
+        <value>0.0.0.0:50010</value>
+    </property>
+    <property>
+        <name>dfs.datanode.http.address</name>
+        <value>0.0.0.0:50075</value>
+    </property>
+    <property>
+        <name>dfs.datanode.ipc.address</name>
+        <value>0.0.0.0:50020</value>
+    </property>
+    <property>
+        <name>dfs.hosts.exclude</name>
+        <value>/etc/hdfs1/conf/exclude-list.txt</value>
+    </property>
+    <property>
+        <name>dfs.domain.socket.path</name>
+        <value>/var/run/hdfs1/dn_socket</value>
+    </property>
+    <property>
+        <name>license.zookeeper.quorum</name>
+        <value>tw-node3:2291,tw-node1:2291,tw-node2:2291</value>
+    </property>
+    <property>
+        <name>dfs.journalnode.edits.dir</name>
+        <value>/hadoop/journal</value>
+    </property>
+    <property>
+        <name>dfs.permissions.superusergroup</name>
+        <value>hbase</value>
+    </property>
+    <property>
+        <name>dfs.client.socket-timeout</name>
+        <value>120000</value>
+    </property>
+    <property>
+        <name>dfs.datanode.data.dir.perm</name>
+        <value>755</value>
+    </property>
+    <property>
+        <name>dfs.datanode.du.reserved</name>
+        <value>21464141824</value>
+    </property>
+    <property>
+        <name>dfs.namenode.handler.count</name>
+        <value>100</value>
+    </property>
+    <property>
+        <name>dfs.namenode.acls.enabled</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>dfs.data.transfer.protection</name>
+        <value>authentication</value>
+    </property>
+    <property>
+        <name>dfs.client.read.shortcircuit</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>dfs.namenode.name.dir</name>
+        <value>/vdir/hadoop/namenode_dir,/vdir/mnt/disk1/hadoop/namenode_dir</value>
+    </property>
+    <property>
+        <name>dfs.datanode.handler.count</name>
+        <value>30</value>
+    </property>
+    <property>
+        <name>dfs.datanode.failed.volumes.tolerated</name>
+        <value>0</value>
+    </property>
+    <property>
+        <name>dfs.datanode.data.dir</name>
+        <value>/vdir/mnt/disk1/hadoop/data</value>
+    </property>
+</configuration>

+ 212 - 0
src/main/resources/log4j.properties

@@ -0,0 +1,212 @@
+# Copyright 2011 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=ALL
+
+# Null Appender
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Rolling File Appender - cap space usage at 1gb.
+#
+hadoop.log.maxfilesize=64MB
+hadoop.log.maxbackupindex=16
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2}: (%F:%M(%L)) - %m%n
+
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2}: (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security appender
+#
+hadoop.security.logger=INFO,NullAppender
+hadoop.security.log.maxfilesize=64MB
+hadoop.security.log.maxbackupindex=16
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# Daily Rolling Security appender
+#
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,NullAppender
+hdfs.audit.log.maxfilesize=64MB
+hdfs.audit.log.maxbackupindex=16
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,NullAppender
+mapred.audit.log.maxfilesize=64MB
+mapred.audit.log.maxbackupindex=16
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
+log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file :
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+hadoop.mapreduce.jobsummary.log.maxfilesize=64MB
+hadoop.mapreduce.jobsummary.log.maxbackupindex=16
+log4j.appender.JSA=org.apache.log4j.RollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
+log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d %p %c{2}: %m%n
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+
+#
+# Yarn ResourceManager Application Summary Log
+#
+# Set the ResourceManager summary log filename
+#yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
+# Set the ResourceManager summary log level and appender
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+
+#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+#log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+#log4j.appender.RMSUMMARY.MaxFileSize=64MB
+#log4j.appender.RMSUMMARY.MaxBackupIndex=16
+#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n