<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<!--the logical name for this new nameservice -->
<property>
<name>dfs.nameservices</name>
<value>mycluster</value>
</property>
<!--unique identifiers for each NameNode in the nameservice -->
<property>
<name>dfs.ha.namenodes.mycluster</name>
<value>nn1,nn2</value>
</property>
<!--the fully-qualified RPC address for each NameNode to listen on -->
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>bigdatastorm:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>bigdataspark:8020</value>
</property>
<!--the fully-qualified HTTP address for each NameNode to listen on -->
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>bigdatastorm:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>bigdataspark:50070</value>
</property>
<!-- the Java class that HDFS clients use to contact the Active NameNode -->
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!--the URI which identifies the group of JNs where the NameNodes will write/read edits -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://bigdatastorm:8485;bigdataspark:8485;bigdatacloud:8485/mycluster</value>
</property>
<!--a list of scripts or Java classes which will be used to fence the Active NameNode during a failover-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_dsa</value>
</property>
<!--the path where the JournalNode daemon will store its local state -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/opt/hadoop-2.5.1/data</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
</configuration>