• hadoop分布式安装


    1.hadoop-2.7.4.tar.gz跟jdk-8u121-linux-x64.tar.gz自行下载并解压,我全部放置在/usr/local/下并命名为hadoop与jdk1.8.0_121,当然只要是1.8的jdk都可以没必要完全一样

    2.所有机器做免密认证,并且对主机名写hosts

    1 [root@hadoop1 hadoop]# cat /etc/hosts
    2 127.0.0.1 localhost
    3 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
    4 192.168.56.11 hadoop1
    5 192.168.56.12 hadoop2
    6 192.168.56.13 hadoop3

    3.配置两个环境变量后生效下

     1 [root@hadoop1 hadoop]# mkdir /usr/local/hadoop/tmp/    # 所有机器
     2 [root@hadoop1 hadoop]# tail /etc/profile
     3 ############# hadoop env #################
     4 export HADOOP_HOME=/usr/local/hadoop
     5 export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
     6 
     7 ############# jdk env #################
     8 export JAVA_HOME=/usr/local/jdk1.8.0_121
     9 export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
    10 export PATH=$JAVA_HOME/bin:$PATH

    4.配置hadoop的个配置文件,只在第一台机器上操作即可

     1 [root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/core-site.xml            # 指定hdfs的主
     2 <configuration>
     3     <property>
     4         <name>fs.defaultFS</name>
     5         <value>hdfs://hadoop1:9000</value>
     6     </property>
     7     <property>
     8         <name>hadoop.tmp.dir</name>
     9         <value>/usr/local/hadoop/tmp</value>
    10     </property>
    11 </configuration>
    12     
    13 [root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml            # 指定hdfs备份副本数量
    14 <configuration>
    15     <property>
    16         <name>dfs.replication</name>
    17         <value>3</value>
    18     </property>
    19 </configuration>
    20 [root@hadoop1 ~]# cp /usr/local/hadoop/etc/hadoop/lmapred-site.xml.template /usr/local/hadoop/etc/hadoop/mapred-site.xml 21 [root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/mapred-site.xml # 指定mr运行在yarn上 22 <configuration> 23 <property> 24 <name>mapreduce.framework.name</name> 25 <value>yarn</value> 26 </property> 27 </configuration> 28 29 [root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/yarn-site.xml # 指定yarn的主 30 <configuration> 31 32 <!-- Site specific YARN configuration properties --> 33 <property> 34 <name>yarn.nodemanager.aux-services</name> 35 <value>mapreduce_shuffle</value> 36 </property> 37 <property> 38 <name>yarn.resourcemanager.hostname</name> 39 <value>hadoop1</value> 40 </property> 41 <property> 42 <name>yarn.nodemanager.resource.memory-mb</name> 43 <value>1024</value> # 占用的内存,默认是8g,不可以小于1024 44 </property> 45 <property> 46 <name>yarn.nodemanager.resource.cpu-vcores</name> 47 <value>1</value> # 使用cpu的核数,物理机可以多分配 48 </property> 49 </configuration> 50 51 [root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/slaves # 指定hadoop集群机器 52 hadoop1 53 hadoop2 54 hadoop3 55 56 [root@hadoop1 ~]# scp -r /usr/local/hadoop/etc/hadoop/* hadoop2:/usr/local/hadoop/etc/hadoop/ 57 [root@hadoop1 ~]# scp -r /usr/local/hadoop/etc/hadoop/* hadoop3:/usr/local/hadoop/etc/hadoop/ 58 [root@hadoop1 ~]# hdfs namenode -format 59 [root@hadoop1 ~]# /usr/local/hadoop/etc/hadoop/start-dfs.sh 60 [root@hadoop1 ~]# /usr/local/hadoop/etc/hadoop/start-yarn.sh

    5.验证

     1 [root@hadoop1 ~]# jps    # 主机
     2 17641 NodeManager
     3 17946 Jps
     4 14331 DataNode
     5 14204 NameNode
     6 14541 SecondaryNameNode
     7 17519 ResourceManager
     8 
     9 [root@hadoop2 java]# jps    # 从机
    10 14784 NodeManager
    11 13797 DataNode
    12 14918 Jps

    http://ip:50070可查看hdfs信息

  • 相关阅读:
    Centos7 FTP服务安装,Centos FTP安装配置
    Shiro CookieRememberMeManager Invalid AES key length
    Shiro thymeleaf整合使用
    闪存中的键值对:无文件系统 minINI
    SAP UI类标准导出XML格式Excel
    CRM item status error
    被某个自认漂亮国的狗腿子骂了。。。
    New ABAP Debugger Session does not close after Exit
    Java队列使用举例
    布隆过滤器使用举例
  • 原文地址:https://www.cnblogs.com/bfmq/p/7832896.html
Copyright © 2020-2023  润新知