0、hdfs存放副本的策略:
先存放一个副本在本地机架的一个节点上,然后将第二个副本存放在本地机架的另外一个节点上,最后将第三个副本存放在不同机架的不同节点上
1、配置机架感知Java类
package com.cr.rackAware;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.mockito.internal.matchers.Null;
import java.util.ArrayList;
import java.util.List;
public class MyRackAware implements DNSToSwitchMapping{
public List<String> resolve(List<String> names) {
ArrayList<String> list = null;
try {
FileWriter fw = new FileWriter("/home/xiaoqiu/rackaware.txt");
list = new ArrayList<String>();
for (String str : names) {
//输出原来的IP地址信息(主机名)
fw.write(str);
fw.write("
");
if (str.startsWith("192")) {
String ip = str.substring(str.indexOf(".") + 1);
if (Integer.parseInt(ip) == 150) {
list.add("/rack1/" + ip);
} else {
list.add("/rack2/" + ip);
}
} else if (str.startsWith("s")) {
String ip = str.substring(1);
if (Integer.parseInt(ip) == 150) {
list.add("/rack1/" + ip);
} else {
list.add("/rack2/" + ip);
}
}
}
fw.close();
} catch (IOException e) {
e.printStackTrace();
}
return list;
}
@Override
public void reloadCachedMappings() {
}
@Override
public void reloadCachedMappings(List<String> list) {
}
}
2、配置core-site.xml并分发到每个节点
[xiaoqiu@s150 /soft/hadoop/etc/hadoop]$ nano core-site.xml
[xiaoqiu@s150 /soft/hadoop/etc/hadoop]$ cat core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://s150</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/xiaoqiu/hadoop_tmp</value>
</property>
<property>
<name>topology.node.switch.mapping.impl</name>
<value>com.cr.rackAware</value>
</property>
</configuration>
[xiaoqiu@s150 /soft/hadoop/etc/hadoop]$ xrsync.sh core-site.xml
p=core-site.xml
dir=.
filename=core-site.xml
=====s151 /soft/hadoop-2.7.5/etc/fullcore-site.xml========
=====s152 /soft/hadoop-2.7.5/etc/fullcore-site.xml========
查看每个节点的信息[xiaoqiu@s150 /home/xiaoqiu]$ xcall.sh "cat /soft/hadoop/etc/hadoop/core-site.xml"
3、重启名称节点
[xiaoqiu@s150 /home/xiaoqiu]$ hadoop-daemon.sh stop namenode
stopping namenode
[xiaoqiu@s150 /home/xiaoqiu]$ hadoop-daemon.sh start namenode
4、在节点s151上传文件到HDFS,观察副本的存放位置
[xiaoqiu@s151 /home/xiaoqiu]$ hadoop fs -put 1.txt /tmp