"Fossies" - the Fresh Open Source Software Archive 
Member "apache-log4j-2.12.4-src/log4j-1.2-api/src/test/resources/config-1.2/hadoop/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties" (20 Dec 2021, 13645 Bytes) of package /linux/misc/apache-log4j-2.12.4-src.tar.gz:
As a special service "Fossies" has tried to format the requested text file into HTML format (style:
standard) with prefixed line numbers.
Alternatively you can here
view or
download the uninterpreted source code file.
See also the last
Fossies "Diffs" side-by-side code changes report for "log4j.properties":
2.17.1_vs_2.17.2-rc1.
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 # Define some default values that can be overridden by system properties
18 hadoop.root.logger=INFO,console
19 hadoop.log.dir=target
20 hadoop.log.file=hadoop.log
21
22 # Define the root logger to the system property "hadoop.root.logger".
23 log4j.rootLogger=${hadoop.root.logger}, EventCounter
24
25 # Logging Threshold
26 log4j.threshold=ALL
27
28 # Null Appender
29 log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
30
31 #
32 # Rolling File Appender - cap space usage at 5gb.
33 #
34 hadoop.log.maxfilesize=256MB
35 hadoop.log.maxbackupindex=20
36 log4j.appender.RFA=org.apache.log4j.RollingFileAppender
37 log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
38
39 log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
40 log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
41
42 log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
43
44 # Pattern format: Date LogLevel LoggerName LogMessage
45 log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
46 # Debugging Pattern format
47 #log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
48
49
50 #
51 # Daily Rolling File Appender
52 #
53
54 log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
55 log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
56
57 # Rollover at midnight
58 log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
59
60 log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
61
62 # Pattern format: Date LogLevel LoggerName LogMessage
63 log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
64 # Debugging Pattern format
65 #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
66
67
68 #
69 # console
70 # Add "console" to rootlogger above if you want to use this
71 #
72
73 log4j.appender.console=org.apache.log4j.ConsoleAppender
74 log4j.appender.console.target=System.err
75 log4j.appender.console.layout=org.apache.log4j.PatternLayout
76 log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
77
78 #
79 # TaskLog Appender
80 #
81
82 #Default values
83 hadoop.tasklog.taskid=null
84 hadoop.tasklog.iscleanup=false
85 hadoop.tasklog.noKeepSplits=4
86 hadoop.tasklog.totalLogFileSize=100
87 hadoop.tasklog.purgeLogSplits=true
88 hadoop.tasklog.logsRetainHours=12
89
90 log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
91 log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
92 log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
93 log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
94
95 log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
96 log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
97
98 #
99 # HDFS block state change log from block manager
100 #
101 # Uncomment the following to log normal block state change
102 # messages from BlockManager in NameNode.
103 #log4j.logger.BlockStateChange=DEBUG
104
105 #
106 #Security appender
107 #
108 hadoop.security.logger=INFO,NullAppender
109 hadoop.security.log.maxfilesize=256MB
110 hadoop.security.log.maxbackupindex=20
111 log4j.category.SecurityLogger=${hadoop.security.logger}
112 hadoop.security.log.file=SecurityAuth-${user.name}.audit
113 log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
114 log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
115 log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
116 log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
117 log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
118 log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
119
120 #
121 # Daily Rolling Security appender
122 #
123 log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
124 log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
125 log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
126 log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
127 log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
128
129 #
130 # hadoop configuration logging
131 #
132
133 # Uncomment the following line to turn off configuration deprecation warnings.
134 # log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
135
136 #
137 # hdfs audit logging
138 #
139 hdfs.audit.logger=INFO,NullAppender
140 hdfs.audit.log.maxfilesize=256MB
141 hdfs.audit.log.maxbackupindex=20
142 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
143 log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
144 log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
145 log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
146 log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
147 log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
148 log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
149 log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
150
151 #
152 # NameNode metrics logging.
153 # The default is to retain two namenode-metrics.log files up to 64MB each.
154 #
155 namenode.metrics.logger=INFO,NullAppender
156 log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
157 log4j.additivity.NameNodeMetricsLog=false
158 log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
159 log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
160 log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
161 log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
162 log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
163 log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
164
165 #
166 # DataNode metrics logging.
167 # The default is to retain two datanode-metrics.log files up to 64MB each.
168 #
169 datanode.metrics.logger=INFO,NullAppender
170 log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger}
171 log4j.additivity.DataNodeMetricsLog=false
172 log4j.appender.DNMETRICSRFA=org.apache.log4j.RollingFileAppender
173 log4j.appender.DNMETRICSRFA.File=${hadoop.log.dir}/datanode-metrics.log
174 log4j.appender.DNMETRICSRFA.layout=org.apache.log4j.PatternLayout
175 log4j.appender.DNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
176 log4j.appender.DNMETRICSRFA.MaxBackupIndex=1
177 log4j.appender.DNMETRICSRFA.MaxFileSize=64MB
178
179 #
180 # mapred audit logging
181 #
182 mapred.audit.logger=INFO,NullAppender
183 mapred.audit.log.maxfilesize=256MB
184 mapred.audit.log.maxbackupindex=20
185 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
186 log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
187 log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
188 log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
189 log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
190 log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
191 log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
192 log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
193
194 # Custom Logging levels
195
196 #log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
197 #log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
198 #log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
199
200 # Jets3t library
201 log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
202
203 # AWS SDK & S3A FileSystem
204 log4j.logger.com.amazonaws=ERROR
205 log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
206 log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
207
208 #
209 # Event Counter Appender
210 # Sends counts of logging messages at different severity levels to Hadoop Metrics.
211 #
212 log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
213
214 #
215 # Job Summary Appender
216 #
217 # Use following logger to send summary to separate file defined by
218 # hadoop.mapreduce.jobsummary.log.file :
219 # hadoop.mapreduce.jobsummary.logger=INFO,JSA
220 #
221 hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
222 hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
223 hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
224 hadoop.mapreduce.jobsummary.log.maxbackupindex=20
225 log4j.appender.JSA=org.apache.log4j.RollingFileAppender
226 log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
227 log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
228 log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
229 log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
230 log4j.appender.JSA.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
231 log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
232 log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
233
234 #
235 # shuffle connection log from shuffleHandler
236 # Uncomment the following line to enable logging of shuffle connections
237 # log4j.logger.org.apache.hadoop.mapred.ShuffleHandler.audit=DEBUG
238
239 #
240 # Yarn ResourceManager Application Summary Log
241 #
242 # Set the ResourceManager summary log filename
243 yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
244 # Set the ResourceManager summary log level and appender
245 yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
246 #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
247
248 # To enable AppSummaryLogging for the RM,
249 # set yarn.server.resourcemanager.appsummary.logger to
250 # <LEVEL>,RMSUMMARY in hadoop-env.sh
251
252 # Appender for ResourceManager Application Summary Log
253 # Requires the following properties to be set
254 # - hadoop.log.dir (Hadoop Log directory)
255 # - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
256 # - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
257
258 log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
259 log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
260 log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
261 log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
262 log4j.appender.RMSUMMARY.MaxFileSize=256MB
263 log4j.appender.RMSUMMARY.MaxBackupIndex=20
264 log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
265 log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
266
267 # HS audit log configs
268 #mapreduce.hs.audit.logger=INFO,HSAUDIT
269 #log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
270 #log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
271 #log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
272 #log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
273 #log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
274 #log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
275 #log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
276
277 # Http Server Request Logs
278 #log4j.logger.http.requests.namenode=INFO,namenoderequestlog
279 #log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
280 #log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
281 #log4j.appender.namenoderequestlog.RetainDays=3
282
283 #log4j.logger.http.requests.datanode=INFO,datanoderequestlog
284 #log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
285 #log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
286 #log4j.appender.datanoderequestlog.RetainDays=3
287
288 #log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
289 #log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
290 #log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
291 #log4j.appender.resourcemanagerrequestlog.RetainDays=3
292
293 #log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
294 #log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
295 #log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
296 #log4j.appender.jobhistoryrequestlog.RetainDays=3
297
298 #log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
299 #log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
300 #log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
301 #log4j.appender.nodemanagerrequestlog.RetainDays=3
302
303
304 # WebHdfs request log on datanodes
305 # Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to
306 # direct the log to a separate file.
307 #datanode.webhdfs.logger=INFO,console
308 #log4j.logger.datanode.webhdfs=${datanode.webhdfs.logger}
309 #log4j.appender.HTTPDRFA=org.apache.log4j.DailyRollingFileAppender
310 #log4j.appender.HTTPDRFA.File=${hadoop.log.dir}/hadoop-datanode-webhdfs.log
311 #log4j.appender.HTTPDRFA.layout=org.apache.log4j.PatternLayout
312 #log4j.appender.HTTPDRFA.layout.ConversionPattern=%d{ISO8601} %m%n
313 #log4j.appender.HTTPDRFA.DatePattern=.yyyy-MM-dd
314
315
316 # Appender for viewing information for errors and warnings
317 yarn.ewma.cleanupInterval=300
318 yarn.ewma.messageAgeLimitSeconds=86400
319 yarn.ewma.maxUniqueMessages=250
320 log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
321 log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
322 log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
323 log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}