/*为防止处理超大作业时超时,将io时间设为1小时
*
<property>
<name>dfs.datanode.socket.write.timeout</name>
<value>6000000</value>
</property>
<property>
<name>dfs.socket.timeout</name>此参数已经废弃,使用dfs.client.socket-timeout
<value>6000000</value>
</property>
*
*/
Configuration conf = new Configuration();
conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "false");
conf.set("dfs.datanode.socket.write.timeout", "7200000");
//Configuration.deprecation: dfs.socket.timeout is deprecated. Instead, use dfs.client.socket-timeout
conf.set("dfs.client.socket-timeout", "7200000");
//Default 0.7, JavaHeapSize在总的堆大小中shuffle占内存的百分比
conf.set("mapreduce.reduce.shuffle.input.buffer.percent", "0.6");
//Default 0.25, a single shuffle use max memory percent
conf.set("mapreduce.reduce.shuffle.memory.limit.percent", "0.10");
//当遇到超大文件时,将默认的128MB改为最小切分块为256MB。mapreduce.min.split.size属性已废弃,使用新API属性mapreduce.input.fileinputformat.split.minsize/FileInputFormat.setMinInputSplitSize(job, 268435456);
conf.set("mapreduce.input.fileinputformat.split.minsize", "268435456");
//当遇到超大文件时,将默认的128MB改为最大切分块为512MB。mapreduce.max.split.size属性已废弃,使用新API属性mapreduce.input.fileinputformat.split.maxsize/FileInputFormat.setMaxInputSplitSize(job, 536870912);
conf.set("mapreduce.input.fileinputformat.split.maxsize", "536870912");;
//每个Map任务分配的内存使用量,默认1024mb
conf.set("mapreduce.map.memory.mb", "5120");
//每个Reduce任务分配的内存使用量,默认1024mb
conf.set("mapreduce.reduce.memory.mb", "5120");
/**对MapReduce应用分配jvm堆内存使用,如果单个Map处理数据量较大报: GC overhead limit exceeded则通过如下设置解决*/
//对map最大的资源限制
conf.set("mapreduce.map.memory.mb","5000");
//对map中对jvm child设置最大的堆大小
conf.set("mapreduce.map.java.opts","-Xmx5000m");
//对reduce最大的资源限制
conf.set("mapreduce.reduce.memory.mb","5000");
//对reduce中对jvm child设置最大的堆大小
conf.set("mapreduce.reduce.java.opts","-Xmx5000m");