`
- 浏览:
539364 次
-
[root@c02b01 conf]# cat log4j.properties
# Define some default values that can be overridden by system properties
hadoop.root.logger=DEBUG,console
hadoop.log.dir=.
hadoop.log.file=hadoop.log
#
# Job Summary Appender
#
# Use following logger to send summary to separate file defined by
# hadoop.mapreduce.jobsummary.log.file rolled daily:
# hadoop.mapreduce.jobsummary.logger=INFO,JSA
#
hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
# Define the root logger to the system property "hadoop.root.logger".
log4j.rootLogger=${hadoop.root.logger}, EventCounter
# Logging Threshold
log4j.threshhold=ALL
#
# Daily Rolling File Appender
#
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
# Rollver at midnight
log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
# 30-day backup
#log4j.appender.DRFA.MaxBackupIndex=30
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
# Pattern format: Date LogLevel LoggerName LogMessage
log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
# Debugging Pattern format
#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
#
# console
# Add "console" to rootlogger above if you want to use this
#
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
#
# TaskLog Appender
#
#Default values
hadoop.tasklog.taskid=null
hadoop.tasklog.iscleanup=false
hadoop.tasklog.noKeepSplits=4
hadoop.tasklog.totalLogFileSize=100
hadoop.tasklog.purgeLogSplits=true
hadoop.tasklog.logsRetainHours=12
log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
#
#Security audit appender
#
hadoop.security.log.file=SecurityAuth.audit
log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
#new logger
log4j.logger.SecurityLogger=OFF,console
log4j.logger.SecurityLogger.additivity=false
#
# Rolling File Appender
#
#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
# Logfile size and and 30-day backups
#log4j.appender.RFA.MaxFileSize=1MB
#log4j.appender.RFA.MaxBackupIndex=30
#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
#
# FSNamesystem Audit logging
# All audit events are logged at INFO level
#
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
# Custom Logging levels
#hadoop.metrics.log.level=DEBUG
#log4j.logger.org.apache.hadoop=DEBUG
log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
log4j.logger.org.apache.hadoop.mapred.JobInProgress=DEBUG
#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
# Jets3t library
log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
#
# Null Appender
# Trap security logger on the hadoop client side
#
log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
#
# Event Counter Appender
# Sends counts of logging messages at different severity levels to Hadoop Metrics.
#
log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
#
# Job Summary Appender
#
log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
log4j.appender.JSA.DatePattern=.yyyy-MM-dd
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
# JIP
#log4j.logger.jipLogger=INFO,JIP
#log4j.appender.JIP=org.apache.log4j.FileAppender
#log4j.appender.JIP.File=${hadoop.log.dir}/JIP/jip.log
#log4j.appender.JIP.layout=org.apache.log4j.PatternLayout
#log4j.appender.JIP.layout.ConversionPattern=%d %p [%c] - %m%n
#log4j.additivity.jipLogger=false
# heartbeat
log4j.logger.heartbeatLogger=INFO,HEARTBEAT
log4j.appender.HEARTBEAT=org.apache.log4j.FileAppender
log4j.appender.HEARTBEAT.File=${hadoop.log.dir}/HEARTBEAT/heart.log
log4j.appender.HEARTBEAT.layout=org.apache.log4j.PatternLayout
log4j.appender.HEARTBEAT.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.additivity.heartbeatLogger=false
# JobQueueTaskScheduler
log4j.logger.jQTLogger=INFO,TSR
log4j.appender.TSR=org.apache.log4j.FileAppender
log4j.appender.TSR.File=${hadoop.log.dir}/HEARTBEAT/tsr.log
log4j.appender.TSR.layout=org.apache.log4j.PatternLayout
log4j.appender.TSR.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.additivity.jQTLogger=false
分享到:
Global site tag (gtag.js) - Google Analytics
相关推荐
log4j-kafka提供一个将标准log4j日志输出到kafka的工具。客户端配置:##log4j.properties#####kafka topic名称log4j.appender.kafka.topic=log4jtest#####kafka broker地址log4j.appender.kafka.brokerList=Hadoop2-...
一个月研究时间试验后总结的文档用户系统日志数据分析 比较实用
1.log4j:WARNNoappenderscouldbefoundforlogger(org.apache.hadoop.util.Shell). 2.log4j:WARNPleaseinitializethelog4jsystemproperly. 3.log4j:WARNSeehttp://logging.apache.org/log4j/1.2/faq....
log4j的配置文件,用于打印日志,在hadoop中也有使用的
此压缩包主要包含的是是hadoop的7个主要的配置文件,core-site.xml、hdfs-site.xml、mapred-site.xml、yarn-site.xml、hadoop-env.sh、mapred-env.sh、yarn-env.sh精简配置...以及两个log4j.properties日志配置文件。
HDFS客户端操作(开发重点) 1. HDFS客户端环境准备 1.1、请参考文章: Windows10下搭建eclipse开发hadoop的开发环境 ... org.apache.logging.log4j log4j-core 2.8.2 org.apache.hadoop hadoop-com
谷歌师兄的leetcode刷题笔记日志解析器 基于机器学习的日志解析器 数据集 日志大小 描述 来源 高密度文件系统 11197705 Hadoop 运行时日志 W. Xu、L. Huang、A. Fox、D. Patterson 和 MI Jordan,“通过挖掘控制台...
暴风纱 Storm-yarn使Storm群集可以部署到Hadoop YARN管理的计算机中。 这项工作仍在进行中。 贡献者 ... 我们已经将日志记录框架从logback更新为log4j2。 如何安装和使用 先决条件 首先安装Java 8和Maven
Wi-Fi Probe Analysis WIFI探针是一种可以记录...Log4j 日志记录工具 Accumulator 累加器,相当于Spark中的全局变量 FastJson Json解析工具 HBase Client 操作HBase 利用Kafka或HDFS缓存数据, 供实时分析程序提取 将原
主版本就是apache log4j2的集成和spring标准全套(含日志、拦截器、过滤器、工具类)、空工程项目。 适合快速检出。 algorithm 使用Java语言实现一些算法,如果要查看C语言实现相关算法,请参照。 appcache 应用级...
log4j logback commong logging jdk logger 测试框架 测试框架 junit easymock testng mockito bug管理 禅道 jira 开发工具 编程工具 eclipse myeclipse idea vi VS webstorm sublime text ...
Log4j用于记录 我打算遵循的方法是: 编写JUnit测试用例,以确保我遵循TDD的基本原理。 用基本的错误测试编写解决方案类,例如输入是否有效及其为空。 编写主要逻辑以照顾主要需求 重新考虑以确保可读性和清洁性...