• 伪分布式&&完全分布式&&高可用(zookeeper)的配置文件内容


    【伪分布式】

    ①[core-site.xml]

    <configuration>
    <property>
    <name>fs.defaultFS</name>
    <value>hdfs://localhost/</value>
    </property>
    </configuration>

    ②[mapred-site.xml]

    <configuration>
    <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
    </property>
    </configuration>

    ③yarn-site.xml

    <configuration>
    <property>
    <name>yarn.resourcemanager.hostname</name>
    <value>localhost</value>
    </property>
    <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
    </property>
    </configuration>

    ④hdfs-site.xml

    <configuration>
    <property>
    <name>dfs.replication</name>
    <value>1</value>
    </property>
    </configuration>

    ⑤slaves

    localhost

    [完全分布式:]

    ①core-site.xml

    <configuration>
    <property>
    <name>fs.defaultFS</name>
    <value>hdfs://s100/</value>
    </property>
    <property>
    <name>hadoop.tmp.dir</name>
    <value>/home/neworigin/hadoop</value>
    </property>

    </configuration>

    ②hdfs-site.xml

    <configuration>
    <property>
    <name>dfs.replication</name>
    <value>3</value>
    </property>
    <property>
    <name>dfs.replication.min</name>
    <value>1</value>
    </property>

    <property>
    <name>dfs.namenode.name.dir</name>
    <value>file://${hadoop.tmp.dir}/hdfs/name1,file://${hadoop.tmp.dir}/hdfs/name2</value>
    </property>

    <property>
    <name>dfs.datanode.data.dir</name>
    <value>file://${hadoop.tmp.dir}/hdfs/data1,file://${hadoop.tmp.dir}/hdfs/data2</value>
    </property>
    <property>
    <name>dfs.namenode.checkpoint.dir</name>
    <value>file://${hadoop.tmp.dir}/hdfs/namesecondary</value>
    </property>
    <property>
    <name>dfs.datanode.http.address</name>
    <value>0.0.0.0:50075</value>

    </property>

    </configuration>

    ③yarn-site.xml

    <configuration>

    <!-- Site specific YARN configuration properties -->
    <property>
    <name>yarn.resourcemanager.hostname</name>
    <value>s100</value>
    </property>
    <property>
    <name>yarn.nodemanager.local-dirs</name>
    <value>file://${hadoop.tmp.dir}/nm-local-dir</value>
    </property>
    <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
    </property>
    <property>
    <name>yarn.nodemanager.resource.memory-mb</name>
    <value>16384</value>

    </property>
    <property>
    <name>yarn.nodemanager.resource.cpu-vcores</name>
    <value>16</value>
    </property>
    </configuration>

    ④mapred-site.xml

    <configuration>
    <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
    </property>
    <property>
    <name>mapred.jobtracker.http.address</name>
    <value>0.0.0.0:50030</value>
    </property>
    <property>
    <name>mapred.tasktracker.http.address</name>
    <value>0.0.0.0:50060</value>
    </property>

    </configuration>

    ⑤slaves

    s100
    s101
    s102

    start-all.sh

     [zookeeper]

    yarn-site.xml

    <configuration>

    <!-- Site specific YARN configuration properties -->
    <property>
    <name>yarn.resourcemanager.hostname</name>
    <value>s100</value>
    </property>
    <property>
    <name>yarn.nodemanager.local-dirs</name>
    <value>file://${hadoop.tmp.dir}/nm-local-dir</value>
    </property>
    <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
    </property>
    <property>
    <name>yarn.nodemanager.resource.memory-mb</name>
    <value>16384</value>
    </property>

    <property>
    <name>yarn.nodemanager.resource.cpu-vcores</name>
    <value>16</value>
    </property>
    <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
    </property>

    <property>
    <name>yarn.resourcemanager.ha.enabled</name>
    <value>true</value>
    </property>

    <property>
    <name>yarn.resourcemanager.cluster-id</name>
    <value>cluster1</value>
    </property>
    <property>
    <name>yarn.resourcemanager.ha.rm-ids</name>
    <value>rm1,rm2</value>
    </property>
    <property>
    <name>yarn.resourcemanager.hostname.rm1</name>
    <value>s102</value>
    </property>
    <property>

    <name>yarn.resourcemanager.hostname.rm2</name>
    <value>s103</value>
    </property>
    <property>
    <name>yarn.resourcemanager.zk-address</name>
    <value>s100:2181,s101:2181,s102:2181</value>
    </property>
    </configuration>

    ②hdfs-site.xml

    <configuration>
    <property>
    <name>dfs.replication</name>
    <value>3</value>
    </property>
    <property>
    <name>dfs.replication.min</name>
    <value>1</value>
    </property>

    <property>
    <name>dfs.namenode.name.dir</name>
    <value>file://${hadoop.tmp.dir}/hdfs/name1,file://${hadoop.tmp.dir}/hd$
    </property>
    <property>
    <name>dfs.datanode.data.dir</name>
    <value>file://${hadoop.tmp.dir}/hdfs/data1,file://${hadoop.tmp.dir}/hd$
    </property>
    <property>

    <name>dfs.datanode.http.address</name>
    <value>0.0.0.0:50075</value>
    </property>
    <property>
    <name>dfs.nameservices</name>
    <value>neworigin</value>
    </property>
    <property>
    <name>dfs.ha.namenodes.neworigin</name>

    <value>nn1,nn2</value>
    </property>
    <property>
    <name>dfs.namenode.rpc-address.neworigin.nn1</name>
    <value>s100:9000</value>
    </property>
    <property>
    <name>dfs.namenode.rpc-address.neworigin.nn2</name>
    <value>s101:9000</value>
    </property>
    <property>
    <name>dfs.namenode.http-address.neworigin.nn1</name>
    <value>s100:50070</value>

    </property>
    <property>
    <name>dfs.namenode.http-address.neworigin.nn2</name>
    <value>s101:50070</value>
    </property>
    <property>
    <name>dfs.namenode.shared.edits.dir</name>
    <value>qjournal://s101:8485;s102:8485;s103:8485/neworigin</value>
    </property>
    <property>
    <name>dfs.client.failover.proxy.provider.neworigin</name>
    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverPro$
    </property>

    <property>
    <name>dfs.ha.fencing.methods</name>
    <value>sshfence</value>
    </property>
    <property>
    <name>dfs.ha.fencing.ssh.private-key-files</name>
    <value>/home/neworigin/.ssh/id_rsa</value>
    </property>
    <property>
    <name>dfs.journalnode.edits.dir</name>
    <value>/home/neworigin/journal/data</value>
    </property>

    <property>
    <name>dfs.ha.automatic-failover.enabled</name>
    <value>true</value>
    </property>

    </configuration>

    ③core-site.xml

    <configuration>
    <property>
    <name>fs.defaultFS</name>
    <value>hdfs://neworigin</value>
    </property>
    <property>
    <name>hadoop.tmp.dir</name>
    <value>/home/neworigin/hadoop</value>
    </property>
    <property>
    <name>ha.zookeeper.quorum</name>
    <value>s100:2181,s101:2181,s102:2181</value>
    </property>

    </configuration>

    ④mapred-site.xml

    <configuration>
    <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
    </property>
    <property>
    <name>mapred.jobtracker.http.address</name>
    <value>0.0.0.0:50030</value>
    </property>
    <property>
    <name>mapred.tasktracker.http.address</name>
    <value>0.0.0.0:50060</value>
    </property>

    </configuration>

  • 相关阅读:
    NetBeans IDE 配置XDebug
    XAMPP环境下用phpStorm+XDebug进行断点调试的配置
    vim最通用编辑器命令
    MySQL全文索引应用简明教程
    完美解决 nginx No input file specified.
    htmlspecialchars()和htmlspecialchars_decode()
    中文CentOS 6.5网络配置及设置IP地址
    简单楼层导航代码
    linux重启nginx后出现nginx的pid丢失的解决方案
    nginx启动、关闭、重启
  • 原文地址:https://www.cnblogs.com/chengdonghui/p/7827382.html
Copyright © 2020-2023  润新知