• hadoop 集群搭建 配置 spark yarn 对效率的提升永无止境 Hadoop Volume 配置


    【手动验证:任意2个节点间是否实现 双向 ssh免密登录】

    弄懂通信原理和集群的容错性

    任意2个节点间实现双向 ssh免密登录,默认在~目录下

    【实现上步后,在其中任一节点安装配置hadoop后,可以将整个安装目录scp复制到各个节点::::各个节点的文件内容是一样的!!!!】

    [hadoop@bigdata-server-03 ~]$ jps
    9217 SecondaryNameNode
    9730 Jps
    9379 ResourceManager
    9497 NodeManager
    8895 NameNode
    9039 DataNode 
    [hadoop@bigdata-server-01 ~]$ ssh bigdata-server-01
    Last login: Sat Nov 25 23:13:06 2017 from 120.178.18.4 
    [hadoop@bigdata-server-01 ~]$ jps
    19035 Jps
    18670 DataNode
    [hadoop@bigdata-server-01 ~]$ sh bigdata-server-02
    Last login: Sat Nov 25 23:14:03 2017 from 120.0.0.1 
    [hadoop@bigdata-server-01 ~]$ jps
    19035 Jps
    18670 DataNode
    

      

    【BASE】
    https://stackoverflow.com/questions/26346277/scp-files-from-local-to-remote-machine-error-no-such-file-or-directory
    http://www.tldp.org/LDP/lame/LAME/linux-admin-made-easy/removing-user-accounts.html
    
    
    $ whoami
    user1
    $ su - user2
    Password:
    $ whoami
    user2
    $ exit
    logout
    
     
     
    http://www.binarytides.com/linux-command-shutdown-reboot-restart-system/
    
    [2.9.0]
    http://www.server-world.info/en/note?os=CentOS_7&p=hadoop
    http://www.server-world.info/en/note?os=CentOS_7&p=jdk8
    http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html#Configuring_Environment_of_Hadoop_Daemons
    http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml
    
    
    http://www.codecoffee.com/tipsforlinux/articles/22.html
    the directory size
    du ~ -s 

    3节点 spark-yarn 配置
    【免密通信模块 】
    su - hadoop;

    ssh-keygen -t rsa;


    cd /usr/hadoop/.ssh/;


    cat /etc/hosts;

    ssh bigdata-server-01;
    ssh bigdata-server-02;
    ssh bigdata-server-03;
    # passwd hadoop;
    ssh-copy-id bigdata-server-01;
    ssh-copy-id bigdata-server-02;
    ssh-copy-id bigdata-server-03;

    cat /etc/ssh/sshd_config

    阅读配置信息

    【安装spark 】

    【安装配置 hadoop】


    [root@bigdata-server-02 ~]# cd /usr/local/hadoop
    [root@bigdata-server-02 hadoop]# ll -as
    total 208
    4 drwxr-xr-x 9 bigdata bigdata 4096 Dec 9 03:42 .
    4 drwxr-xr-x. 18 root root 4096 Dec 19 21:40 ..
    4 drwxr-xr-x 2 bigdata bigdata 4096 Dec 9 03:42 bin
    4 drwxr-xr-x 3 bigdata bigdata 4096 Dec 9 03:17 etc
    4 drwxr-xr-x 2 bigdata bigdata 4096 Dec 9 03:42 include
    4 drwxr-xr-x 3 bigdata bigdata 4096 Dec 9 03:42 lib
    4 drwxr-xr-x 4 bigdata bigdata 4096 Dec 9 03:42 libexec
    144 -rw-r--r-- 1 bigdata bigdata 147066 Nov 15 03:19 LICENSE.txt
    24 -rw-r--r-- 1 bigdata bigdata 20891 Nov 15 03:19 NOTICE.txt
    4 -rw-r--r-- 1 bigdata bigdata 1366 Jul 9 2016 README.txt
    4 drwxr-xr-x 3 bigdata bigdata 4096 Dec 9 03:17 sbin
    4 drwxr-xr-x 4 bigdata bigdata 4096 Dec 9 03:53 share
    [root@bigdata-server-02 hadoop]# pwd
    /usr/local/hadoop
    [root@bigdata-server-02 hadoop]# mkdir mydatanode
    [root@bigdata-server-02 hadoop]# mkdir mynamenode
    [root@bigdata-server-02 hadoop]#


    # add into <configuration> - </configuration> section
    <configuration>
    <property>
    <name>dfs.replication</name>
    <value>2</value>
    </property>
    <property>
    <name>dfs.datanode.data.dir</name>
    <value>file:///usr/local/hadoop/mydatanode</value>
    </property>
    <property>
    <name>dfs.namenode.name.dir</name>
    <value>file:///usr/local/hadoop/mynamenode</value>
    </property>
    </configuration>
    vi etc/hadoop/hdfs-site.xml;

    # add into <configuration> - </configuration> section
    <configuration>
    <property>
    <name>fs.defaultFS</name>
    <value>hdfs://bigdata-server-02:8080/</value>
    </property>
    </configuration>
    #Problem binding to [bigdata-server-01:9000] java.net.BindException: Cannot assign requested address;
    #9001

    vi etc/hadoop/core-site.xml;


    # create new
    <configuration>
    <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
    </property>
    </configuration>

    vi etc/hadoop/mapred-site.xml;

    # add into <configuration> - </configuration> section

    <configuration>
    <property>
    <name>yarn.resourcemanager.hostname</name>
    <value>bigdata-server-02</value>
    </property>
    <property>
    <name>yarn.nodemanager.hostname</name>
    <value>bigdata-server-02</value>
    </property>
    <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
    </property>
    </configuration>


    vi etc/hadoop/yarn-site.xml;


    scp hadoop-3.0.0 root@bigdata-server-01:/usr/local;scp hadoop-3.0.0 root@bigdata-server-03:/usr/local;
    【压缩,scp 复制,ssh 解压、创建软连接】
    tar -cf hadoop-3.0.0.mycom.tar hadoop-3.0.0;
    scp hadoop-3.0.0.mycom.tar root@bigdata-server-01:/usr/local;scp hadoop-3.0.0.mycom.tar root@bigdata-server-03:/usr/local;

    ssh bigdata-server-03 "cd /usr/local;tar -xvf hadoop-3.0.0.mycom.tar;ln -s hadoop-3.0.0 hadoop;";
    ssh bigdata-server-01 "cd /usr/local;tar -xvf hadoop-3.0.0.mycom.tar;ln -s hadoop-3.0.0 hadoop;";

    Hadoop- datanode and node manager not running

    https://stackoverflow.com/questions/32753218/yarn-do-we-need-nodemanager-on-namenode

    启动原理  pid

    信息存储位置 

    旧有信息冲突

    [root@bigdata-server-02 hadoop]# rm -rf mynamenode/*;rm -rf mydatanode/*;rm -rf /tmp/*hadoop*;rm -rf /tmp/*yarn*; rm -rf /tmp/*pid;
    [root@bigdata-server-02 hadoop]# ssh bigdata-server-01 'cd /usr/local/hadoop;rm -rf mynamenode/*;rm -rf mydatanode/*;rm -rf /tmp/*hadoop*;rm -rf /tmp/*yarn*; rm -rf /tmp/*pid;';
    [root@bigdata-server-02 hadoop]# ssh bigdata-server-03 'cd /usr/local/hadoop;rm -rf mynamenode/*;rm -rf mydatanode/*;rm -rf /tmp/*hadoop*;rm -rf /tmp/*yarn*; rm -rf /tmp/*pid;';
    [root@bigdata-server-02 hadoop]#

    ssh bigdata-server-01 'cd /usr/local/hadoop;rm -rf {mydataname,mynamenode}/*';

    4 -rwxr-xr-x 1 root root 349 Dec 25 23:12 root_rm_logs_mydn-nn_roottmp.sh
    4 -rwxr-xr-x 1 root root 349 Dec 25 23:14 root_rm_mydn-nn_roottmp.sh
    4 drwxr-xr-x 2 20415 101 4096 Dec 16 09:12 sbin
    4 drwxr-xr-x 4 20415 101 4096 Dec 16 09:12 share
    [root@bigdata-server-02 hadoop]# cat root_rm_logs_mydn-nn_roottmp.sh
    ssh bigdata-server-01 'cd /usr/local/hadoop;rm -rf {mydatanode,mynamenode}/*;rm -rf /tmp/*;rm -rf logs/*';
    ssh bigdata-server-02 'cd /usr/local/hadoop;rm -rf {mydatanode,mynamenode}/*;rm -rf /tmp/*;rm -rf logs/*';
    ssh bigdata-server-03 'cd /usr/local/hadoop;rm -rf {mydatanode,mynamenode}/*;rm -rf /tmp/*;rm -rf logs/*';

    进一步提升,使用for精简代码

    修改密码

    passwd

    192.268.3.102
    192.268.3.103
    root 123

    192.268.2.40
    root 123

    2.40-->3.101 ifconfig enp2s0 192.168.3.101 netmask 255.255.254.0

    【分析ssh-keygen 】

    [root@hadoop3 ~]# rm -rf /root/.ssh
    [root@hadoop3 ~]# ssh-keygen -t rsa;
    Generating public/private rsa key pair.
    Enter file in which to save the key (/root/.ssh/id_rsa): 
    Created directory '/root/.ssh'.
    Enter passphrase (empty for no passphrase): 
    Enter same passphrase again: 
    Your identification has been saved in /root/.ssh/id_rsa.
    Your public key has been saved in /root/.ssh/id_rsa.pub.
    The key fingerprint is:
    SHA256:CCj5BZDUWU5pNK0kvlJz5VYnbjLYuFwnsMvqWrlgA/Y root@hadoop3
    The key's randomart image is:
    +---[RSA 2048]----+
    |o+o +=o          |
    |.. *o=.o o .     |
    |o o *.X o o      |
    | o = B X +       |
    |..o * * S        |
    |o....=           |
    | +.E.            |
    |. +..            |
    | .oo             |
    +----[SHA256]-----+
    [root@hadoop3 ~]# ll -as /root/.ssh/
    总用量 12
    0 drwx------   2 root root   38 7月  12 11:47 .
    4 dr-xr-x---. 28 root root 4096 7月  12 11:47 ..
    4 -rw-------   1 root root 1679 7月  12 11:47 id_rsa
    4 -rw-r--r--   1 root root  394 7月  12 11:47 id_rsa.pub
    [root@hadoop3 ~]# 
    

      

    生成了2个文件

    [root@hadoop3 ~]# cat /root/.ssh/id_rsa
    -----BEGIN RSA PRIVATE KEY-----
    MIIEpAIBAAKCAQEAvKqnSF+sxETyn+xHeF1KUZygmkcWU5eDTAkbSPOjRa8CGK6G
    g5UkayNVdyf/hiHc+PWG5DzLfmvkU4CdylL792U80+lhpFJSZ3spd4lgh8c20mly
    AgzJ5pl/kYaAz5VkF7uMJWX61g46NDWSCO2ruZLuWEMkytTh2RR9Pjjykp80e5mD
    HzGByubpL3uo1iHtfq7cHlMlsiXBf94xdinICJum0SVg9usLrj1X1ASzCZ9dgG3h
    ICuBFli7d/POvu8NCOIUmA2tPfgmeb0RXHJGDGSldpeg08+zMd6Emngn8zmu9Nxv
    8EvmCPZTOxuQ9dr40bumHeMhufZUEYF6LsaZJQIDAQABAoIBAQCFsYSTQ8Eg4B7y
    drP6tlkY1h301ZUbrU1MT1O3cXbsxWR96wbFLaW+Ci7hHkXzXgHBpfNtvysQrhIB
    ni2ylvWYTXQ6UrJviCp+zAcJfx8ZeHD/z9sLWtakA3gjvqV+9EUWkD9yrP6AO1rB
    bojKrOk4uscNYp8q4Ioek2dg9Wfnv5Qaj6Mk7ASOEB0zviSIv/hgYcKZ75oZTv1E
    L/j2tj4Xzyzh0N9wez6ZmnTvT2TdAWZypnLSJIWDT7lskaWv3nH0CAhFG5boPriF
    FqpVA4aR0Q9TNbfIQLtv/zHRUUDhsJNn4q4bH+snDWTl9R+yXX2bljK5IM2NaIQH
    yOQ0+3xhAoGBAODcnJrTDFDVlXUaSKD9KiopJBD63b6reiMaCd/7/hKoxytTGc/o
    x9AK3+5vkjizbB/dbGNsyrXBOTrHaK2Jd1EY3CSTm/r4KNBox+4POZZdboAXwP3u
    HiBkvmosc4tYLZ7v63xjedQGFQLkb7wc0XZOClZ1pxCFkN2UKI1ATDRpAoGBANbK
    6tyNkhoevW2lKJPiZvgJNixW/h1Tfu1Bu6kdWMdZkm3qWoTWffMYn6xFDWsrhDQ3
    QCXFBXTIoMIacl3i3TZb9JriL34jOjNHWkVi6z1ghcO2Oc3M2LfvILMY+Qd2mMgm
    6HC4dj5nDEQ4biOOZgvPhC9ocraNdZbx37EmjTddAoGAIuhcr3RgDxR5NUq1R7jF
    mPH2FWS8k+MO/PAH5Gu8T60/7ivib/JVQqjNhrhvXLoN6Qx4zR6QgZLTjZpzV61l
    hoNzeYIoztdDjscVcpGOgRdUFjKZ1VHn/2NkZBsufM1dl7TrO849lXq0PFS2O9/F
    bLZEyJNPMjNp/9wGR5dZvTECgYEAh+y3fcT1PSRQ2c8Xg6ZVZQdnSd3vR52sB/Z+
    DEIvCVBssrQIfmHCKJFfkkPMfxJ1whlotb4des7vtIXJ9BH5zUmZ3F3gkiE21naD
    8L7tgNTRMY3ivJKyXousFMpr5UYu3xKIK7T/1vOdNprDUCrv9u9mhh3B4jZYwKHl
    3hQ4b10CgYADSt/7N9GEWwVGljcxqBLH/NVZBKMW4gv5pLL+IY5Rqzk97a3Zc0TE
    46HDQORgPTmRquBHOO46sJaoF/lH/E2yK+7ggLsWpQg36L/QCAZj/4JH24M3W+sH
    tz2MlgKckSAxjlwTtP7+dom3uTIo6sih+sRrIWHwzI0CmPmPe/QXjA==
    -----END RSA PRIVATE KEY-----
    [root@hadoop3 ~]# cat /root/.ssh/id_rsa.pub 
    ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8qqdIX6zERPKf7Ed4XUpRnKCaRxZTl4NMCRtI86NFrwIYroaDlSRrI1V3J/+GIdz49YbkPMt+a+RTgJ3KUvv3ZTzT6WGkUlJneyl3iWCHxzbSaXICDMnmmX+RhoDPlWQXu4wlZfrWDjo0NZII7au5ku5YQyTK1OHZFH0+OPKSnzR7mYMfMYHK5ukve6jWIe1+rtweUyWyJcF/3jF2KcgIm6bRJWD26wuuPVfUBLMJn12AbeEgK4EWWLt3886+7w0I4hSYDa09+CZ5vRFcckYMZKV2l6DTz7Mx3oSaeCfzOa703G/wS+YI9lM7G5D12vjRu6Yd4yG59lQRgXouxpkl root@hadoop3
    [root@hadoop3 ~]# 
    

      

    [root@hadoop1 ~]# cat /root/.ssh/id_rsa.pub 
    ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/mSwq7gdDusAiW6gaA8ZAlCWOb9aCv4Bz/2L5JjpWwFPkZUQNpOtnfGRxi4X24Wrk4kq4Boj/mQ3U3sXIpeNz+ZyNVe3OPE3qKVDB2jve5pzeeM7qtWh4Ock30NzpznZFLeY9a+Ic8vDeXPPAEkgibgutibEqWyXuomSCIZGpPlh+HveY6Dtc/oRaoTEfxyLJS0FqyvzRdynCHgwiavbKFyzfL5IQVBRAqBYXLVdC6e4+gSUs1v9DAe/uEOtf5aw9AdScePSUY5AJ1fe2DTucXzci3zCIJoQ8bBedKFf0iIzWZZMLnlqLPG49E4tJCnI4qyyP6nyHv/mN6AEWk95t root@hadoop1
    [root@hadoop1 ~]# cat /root/.ssh/id_rsa
    -----BEGIN RSA PRIVATE KEY-----
    MIIEpAIBAAKCAQEAv5ksKu4HQ7rAIluoGgPGQJQljm/Wgr+Ac/9i+SY6VsBT5GVE
    DaTrZ3xkcYuF9uFq5OJKuAaI/5kN1N7FyKXjc/mcjVXtzjxN6ilQwdo73uac3njO
    6rVoeDnJN9Dc6c52RS3mPWviHPLw3lzzwBJIIm4LrYmxKlsl7qJkgiGRqT5Yfh73
    mOg7XP6EWqExH8ciyUtBasr80Xcpwh4MImr2yhcs3y+SEFQUQKgWFy1XQunuPoEl
    LNb/QwHv7hDrX+WsPQHUnHj0lGOQCdX3tg07nF83It8wiCaEPGwXnShX9IiM1mWT
    C55aizxuPROLSQpyOKssj+p8h7/5jegBFpPebQIDAQABAoIBAHhJuQIGyIbMIz4u
    3x3eCsSWffGr4zfY9NNenguf5XZ7bu/wZ8ZNKQGihgkHOIbjxNGIBLL+X1phA98G
    MZQkGeXA63mMXi1hjOUbJTlfQsFRdWDy5a1TURBR7zNcrKUZWwVZqLgdGCtmlrR0
    FRAcKi97eVdtH85gxTLJv2I3oxRmGFft2yKmVrb9+uLCx2PE6ccpOOATClfHr3K3
    5zOXon2eFVaepu4Si2JJypkzrHtH+qiZodEnQqN/UOVMAhJyyjg04bNxWEMUXF7U
    6nvKuA7Yz1pAVqdb1JHhdOv/4e6zkPc6EyLu7api7g6fPlV2GVHocvYaMbx210TB
    msAkuP0CgYEA/XBYMSv6yTlKCnaaDLarowWOr5RMBr8tf8Lcg4a6nh9uKLAQeAlN
    xee7kH0m6UCkfa0qp4eYhfraghx+XjEZ0igfbwgawLoPFfE371jHDeGRquDH6tu3
    +25mc988O/cnJPwj7/QyXbub36moJ8tHgvq/oI5AI3UNZ7JWBQ8ESn8CgYEAwYjY
    KJ9gwQf1DDmkr5wU4CXGaZlY/7KlAZ4ZTz7SYsaRbe1UyXlnsc/JNQQDcfdeHCOh
    rHKsBiWqk/LnXbm9BVxysH08E8hVFxE2IzBTbQ169qCafIJQD1rpGEND2EEqO2E1
    iIFZZ70Wo7usXTvebjMdNf3WhkCa9y12ssSsKRMCgYEAhLobVdUkh9Ga9xPZ5aKd
    DMlSSp2tmzLwDyLr/W3HuhvXwzNBzLuCoYyU7Dy+7hVOkArqdcZLmI8hdFabz5SD
    Y05j9/AUoq5OTD2B/7VMufZSJV2HFXZwShstSK22i+kJ9RKfd4E6B2DDZ0UgrYaG
    MxBC30DgUjFxDceVyRxuMN0CgYBNqNXkZx/yFXlVcIQPG7icwUu+8BPwdwUTgxdw
    3yqZDEkrLUMKnbboeRKqPXQMdVDEReAITPOOe+rY9220BGY/EnvLKlXDMm5ClVt9
    /1RavEANWyDiuX/ayYYjgEpnKq8BqN5Mamsv34aIKTTfLLjyy3v7QGKm+KG2cf3h
    el4DFQKBgQCJVsTeTDLKNGb3AhT840mt87HI5oVoFNt0GLZ16FBTV7+/qs/2ecdV
    oafb7L4Njsx/0zNQHYc7ql86O/AD/JJP8JBPGMfGOJP0IfKh7F9vxLjAshERD6my
    xllnVB+M2BzNacO4G0rAjWkpdNh2OhQiRPoQbfJ94/QyyKqdR/eqpw==
    -----END RSA PRIVATE KEY-----
    [root@hadoop1 ~]# 
    

      

    +-----------------+
    [root@hadoop2 ~]# cat /root/.ssh/id_rsa
    -----BEGIN RSA PRIVATE KEY-----
    MIIEpAIBAAKCAQEAyREegcNk148FwkcqpJKlZxiM137ZF40zcu6NL0vtCQw5e9p4
    gjkmyxgoc1g8mvzKGdUiIJ2K5AqDc567hEKptP+AN644UsOW+Leq7s096+17tV/a
    cOnYwDjJkwHZX1l9o87C59uXD0aydf60Z9lNzXJvsedkThSuJfN5r0qq3cTz32vY
    CAajPmEwu4p7l3PNAB5kvOTICoesL+Bxssy2JYB5FWCL+ZKaTwiP6LLnsZ54fhej
    j4DZpkuvPj6BUFcLP8jMRwird7jx5lYbd+X5vwVwTbiG0OTE1rZbm9nBdeWAZCRH
    M7kvECajwdtTQAeMqv/vN9pCUGV80dJlD8jPPwIDAQABAoIBADVcjr0fjzbKJVwf
    KQkORklrMY3Lg3AFsF7TQrMHsnvRO7xMCel9o3cJDUs3YrY7WqOqdek0BnVo+OQJ
    f3ilfIalvHCKkzYb5IhTrlC8Na/Ukh1buAx5c2XobE7QkdEFXhvINt/z3k5Wk+xO
    0bAx8r0QnuYXI/647FL0IBpOdbRvJT6a0vgHaJY/XWKLcnZXpN6amp9yepB31An2
    AdEDM26+/9YL9YNJ0lE071SvI7LE/ew3pHjCYb41vni4zDysS94tAPDqBXijWpni
    BGklooQuU8FIp3qPipeO9LqJtqxhTGNDrlxMgN5dUjR0LkbwbqZfcwZmFD+ZLMx+
    V/amGCECgYEA7eO/o1EvSwzEzFP70qiG335HXJY5Op1W4CX7LPeO7DnW+0rSb+45
    0onwVQKDkmw/O0GiwyZ6Bnn6Zpoh/cWTCU0tTMWc1L9ugZlYjb6luW7XECYcc2r7
    J0v0/F76lJAiyezGvxNZueHtpgtFO2b2YSc5ALGsPn/7SBevJK3mCXECgYEA2F+6
    fTwY9yyQKp7qkADOlvVYI8bso0UIV8tmXkhVUTMdRhSuSnLE7GdsB5iJnHOItLfo
    VWb33QpiUVzAduvbLm9ZYWwJJSU5twFXtxhY0ktt6qW940eqZZwoy48kXT8h09nY
    pLILZcWsgjpP0ONQwp5QLS7tMxSGsfyp3Froi68CgYEAljT5G0E20HeWh8H7vs8E
    onfUzWRZbGZOpae1ynXh+8ylrvRWnbBZOFQ6uSKmOz04S80s3XYdFJvOfRyTm+41
    4mil0tTwKvFY8GIIJTAc6lJPX3YA/uus++odHYFHTakZHlDwSVQJkrJSYUa6h0CD
    D2M5vfNx4+DhpGq3/zwChTECgYB9pbFc0g5JUqZxKZFaiC1veg9xzy1Rbl/245WR
    gH2SxpTkQlQnxVfXVANmscyPfoPPNdCD72RWBparWqolJLdF0sFbkmoJGQHX5L60
    Az5o+AZfMVoAZnhrwu/prTjXsTaKmEF2+jEmK1EO2p/I1IfsTBSQ+GQjunKxXuCg
    pmXN3wKBgQCKLyvQT5y7G6YcbGtKQhj2LEgEwiUjurZFWVBSdvvIECfs2JO7uo1t
    PmqIQ43RfnOU5YAblnA3hJeAPKz/hlKw5NBwnOMrGFmOgb2xwyq+xlP83/g/C9dK
    PUjJn0D6MKOmfAcJJS3M2UbwwH6IF6j2xUBP33F2c5EVeMx/KGiYyA==
    -----END RSA PRIVATE KEY-----
    [root@hadoop2 ~]# cat /root/.ssh/id_rsa.pub 
    ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJER6Bw2TXjwXCRyqkkqVnGIzXftkXjTNy7o0vS+0JDDl72niCOSbLGChzWDya/MoZ1SIgnYrkCoNznruEQqm0/4A3rjhSw5b4t6ruzT3r7Xu1X9pw6djAOMmTAdlfWX2jzsLn25cPRrJ1/rRn2U3Ncm+x52ROFK4l83mvSqrdxPPfa9gIBqM+YTC7inuXc80AHmS85MgKh6wv4HGyzLYlgHkVYIv5kppPCI/osuexnnh+F6OPgNmmS68+PoFQVws/yMxHCKt3uPHmVht35fm/BXBNuIbQ5MTWtlub2cF15YBkJEczuS8QJqPB21NAB4yq/+832kJQZXzR0mUPyM8/ root@hadoop2
    [root@hadoop2 ~]# 
    

      

    hadoop1免密登录hadoop2,但hadoop2不能免密登录hadoop1

    [root@hadoop1 ~]# ssh-copy-id hadoop2;
    /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
    The authenticity of host 'hadoop2 (192.168.3.102)' can't be established.
    ECDSA key fingerprint is SHA256:UqQuyu+TPvuGuwdiDAkmKSrjPfjqMFBas1OyTT6aRQg.
    ECDSA key fingerprint is MD5:ed:b8:30:e2:0f:e5:0c:0f:bb:7c:86:2c:9f:72:e3:d0.
    Are you sure you want to continue connecting (yes/no)? yes
    /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
    /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
    root@hadoop2's password: 
    
    Number of key(s) added: 1
    
    Now try logging into the machine, with:   "ssh 'hadoop2'"
    and check to make sure that only the key(s) you wanted were added.
    
    [root@hadoop1 ~]# ssh hadoop2
    Last login: Thu Jul 12 11:11:20 2018 from 192.168.3.99
    [root@hadoop2 ~]# ssh hadoop1
    The authenticity of host 'hadoop1 (192.168.3.101)' can't be established.
    ECDSA key fingerprint is e0:19:b2:4b:1b:d1:4e:d4:21:73:9b:44:a7:2b:d7:8c.
    Are you sure you want to continue connecting (yes/no)? ^C
    [root@hadoop2 ~]#
    

     

    hadoop1文件变化

    [root@hadoop1 ~]# ll -as /root/.ssh/
    总用量 16
    0 drwx------   2 root root   57 7月  12 11:54 .
    4 dr-xr-x---. 16 root root 4096 7月  12 11:48 ..
    4 -rw-------   1 root root 1679 7月  12 11:48 id_rsa
    4 -rw-r--r--   1 root root  394 7月  12 11:48 id_rsa.pub
    4 -rw-r--r--   1 root root  183 7月  12 11:54 known_hosts
    [root@hadoop1 ~]# cat /root/.ssh/id_rsa
    -----BEGIN RSA PRIVATE KEY-----
    MIIEpAIBAAKCAQEAv5ksKu4HQ7rAIluoGgPGQJQljm/Wgr+Ac/9i+SY6VsBT5GVE
    DaTrZ3xkcYuF9uFq5OJKuAaI/5kN1N7FyKXjc/mcjVXtzjxN6ilQwdo73uac3njO
    6rVoeDnJN9Dc6c52RS3mPWviHPLw3lzzwBJIIm4LrYmxKlsl7qJkgiGRqT5Yfh73
    mOg7XP6EWqExH8ciyUtBasr80Xcpwh4MImr2yhcs3y+SEFQUQKgWFy1XQunuPoEl
    LNb/QwHv7hDrX+WsPQHUnHj0lGOQCdX3tg07nF83It8wiCaEPGwXnShX9IiM1mWT
    C55aizxuPROLSQpyOKssj+p8h7/5jegBFpPebQIDAQABAoIBAHhJuQIGyIbMIz4u
    3x3eCsSWffGr4zfY9NNenguf5XZ7bu/wZ8ZNKQGihgkHOIbjxNGIBLL+X1phA98G
    MZQkGeXA63mMXi1hjOUbJTlfQsFRdWDy5a1TURBR7zNcrKUZWwVZqLgdGCtmlrR0
    FRAcKi97eVdtH85gxTLJv2I3oxRmGFft2yKmVrb9+uLCx2PE6ccpOOATClfHr3K3
    5zOXon2eFVaepu4Si2JJypkzrHtH+qiZodEnQqN/UOVMAhJyyjg04bNxWEMUXF7U
    6nvKuA7Yz1pAVqdb1JHhdOv/4e6zkPc6EyLu7api7g6fPlV2GVHocvYaMbx210TB
    msAkuP0CgYEA/XBYMSv6yTlKCnaaDLarowWOr5RMBr8tf8Lcg4a6nh9uKLAQeAlN
    xee7kH0m6UCkfa0qp4eYhfraghx+XjEZ0igfbwgawLoPFfE371jHDeGRquDH6tu3
    +25mc988O/cnJPwj7/QyXbub36moJ8tHgvq/oI5AI3UNZ7JWBQ8ESn8CgYEAwYjY
    KJ9gwQf1DDmkr5wU4CXGaZlY/7KlAZ4ZTz7SYsaRbe1UyXlnsc/JNQQDcfdeHCOh
    rHKsBiWqk/LnXbm9BVxysH08E8hVFxE2IzBTbQ169qCafIJQD1rpGEND2EEqO2E1
    iIFZZ70Wo7usXTvebjMdNf3WhkCa9y12ssSsKRMCgYEAhLobVdUkh9Ga9xPZ5aKd
    DMlSSp2tmzLwDyLr/W3HuhvXwzNBzLuCoYyU7Dy+7hVOkArqdcZLmI8hdFabz5SD
    Y05j9/AUoq5OTD2B/7VMufZSJV2HFXZwShstSK22i+kJ9RKfd4E6B2DDZ0UgrYaG
    MxBC30DgUjFxDceVyRxuMN0CgYBNqNXkZx/yFXlVcIQPG7icwUu+8BPwdwUTgxdw
    3yqZDEkrLUMKnbboeRKqPXQMdVDEReAITPOOe+rY9220BGY/EnvLKlXDMm5ClVt9
    /1RavEANWyDiuX/ayYYjgEpnKq8BqN5Mamsv34aIKTTfLLjyy3v7QGKm+KG2cf3h
    el4DFQKBgQCJVsTeTDLKNGb3AhT840mt87HI5oVoFNt0GLZ16FBTV7+/qs/2ecdV
    oafb7L4Njsx/0zNQHYc7ql86O/AD/JJP8JBPGMfGOJP0IfKh7F9vxLjAshERD6my
    xllnVB+M2BzNacO4G0rAjWkpdNh2OhQiRPoQbfJ94/QyyKqdR/eqpw==
    -----END RSA PRIVATE KEY-----
    [root@hadoop1 ~]# cat /root/.ssh/id_rsa.pub 
    ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/mSwq7gdDusAiW6gaA8ZAlCWOb9aCv4Bz/2L5JjpWwFPkZUQNpOtnfGRxi4X24Wrk4kq4Boj/mQ3U3sXIpeNz+ZyNVe3OPE3qKVDB2jve5pzeeM7qtWh4Ock30NzpznZFLeY9a+Ic8vDeXPPAEkgibgutibEqWyXuomSCIZGpPlh+HveY6Dtc/oRaoTEfxyLJS0FqyvzRdynCHgwiavbKFyzfL5IQVBRAqBYXLVdC6e4+gSUs1v9DAe/uEOtf5aw9AdScePSUY5AJ1fe2DTucXzci3zCIJoQ8bBedKFf0iIzWZZMLnlqLPG49E4tJCnI4qyyP6nyHv/mN6AEWk95t root@hadoop1
    [root@hadoop1 ~]# cat /root/.ssh/known_hosts 
    hadoop2,192.168.3.102 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKwk4ldwjl9bHfulRh/Go9dRfR70PK+XYiFAgE8JuCgBzLjfShC3JQpZNq1uDcXTPSwwWGWxfTe5lWLzKnA6jXc=
    [root@hadoop1 ~]# 
    

      

    hadoop2文件变化

    [root@hadoop2 ~]# ssh hadoop1
    The authenticity of host 'hadoop1 (192.168.3.101)' can't be established.
    ECDSA key fingerprint is e0:19:b2:4b:1b:d1:4e:d4:21:73:9b:44:a7:2b:d7:8c.
    Are you sure you want to continue connecting (yes/no)? ^C
    [root@hadoop2 ~]# ll -as /root/.ssh/
    总用量 16
    0 drwx------   2 root root   61 7月  12 11:52 .
    4 dr-xr-x---. 25 root root 4096 7月  12 11:47 ..
    4 -rw-------   1 root root  394 7月  12 11:52 authorized_keys
    4 -rw-------   1 root root 1679 7月  12 11:47 id_rsa
    4 -rw-r--r--   1 root root  394 7月  12 11:47 id_rsa.pub
    [root@hadoop2 ~]# cat /root/.ssh/id_rsa
    -----BEGIN RSA PRIVATE KEY-----
    MIIEpAIBAAKCAQEAyREegcNk148FwkcqpJKlZxiM137ZF40zcu6NL0vtCQw5e9p4
    gjkmyxgoc1g8mvzKGdUiIJ2K5AqDc567hEKptP+AN644UsOW+Leq7s096+17tV/a
    cOnYwDjJkwHZX1l9o87C59uXD0aydf60Z9lNzXJvsedkThSuJfN5r0qq3cTz32vY
    CAajPmEwu4p7l3PNAB5kvOTICoesL+Bxssy2JYB5FWCL+ZKaTwiP6LLnsZ54fhej
    j4DZpkuvPj6BUFcLP8jMRwird7jx5lYbd+X5vwVwTbiG0OTE1rZbm9nBdeWAZCRH
    M7kvECajwdtTQAeMqv/vN9pCUGV80dJlD8jPPwIDAQABAoIBADVcjr0fjzbKJVwf
    KQkORklrMY3Lg3AFsF7TQrMHsnvRO7xMCel9o3cJDUs3YrY7WqOqdek0BnVo+OQJ
    f3ilfIalvHCKkzYb5IhTrlC8Na/Ukh1buAx5c2XobE7QkdEFXhvINt/z3k5Wk+xO
    0bAx8r0QnuYXI/647FL0IBpOdbRvJT6a0vgHaJY/XWKLcnZXpN6amp9yepB31An2
    AdEDM26+/9YL9YNJ0lE071SvI7LE/ew3pHjCYb41vni4zDysS94tAPDqBXijWpni
    BGklooQuU8FIp3qPipeO9LqJtqxhTGNDrlxMgN5dUjR0LkbwbqZfcwZmFD+ZLMx+
    V/amGCECgYEA7eO/o1EvSwzEzFP70qiG335HXJY5Op1W4CX7LPeO7DnW+0rSb+45
    0onwVQKDkmw/O0GiwyZ6Bnn6Zpoh/cWTCU0tTMWc1L9ugZlYjb6luW7XECYcc2r7
    J0v0/F76lJAiyezGvxNZueHtpgtFO2b2YSc5ALGsPn/7SBevJK3mCXECgYEA2F+6
    fTwY9yyQKp7qkADOlvVYI8bso0UIV8tmXkhVUTMdRhSuSnLE7GdsB5iJnHOItLfo
    VWb33QpiUVzAduvbLm9ZYWwJJSU5twFXtxhY0ktt6qW940eqZZwoy48kXT8h09nY
    pLILZcWsgjpP0ONQwp5QLS7tMxSGsfyp3Froi68CgYEAljT5G0E20HeWh8H7vs8E
    onfUzWRZbGZOpae1ynXh+8ylrvRWnbBZOFQ6uSKmOz04S80s3XYdFJvOfRyTm+41
    4mil0tTwKvFY8GIIJTAc6lJPX3YA/uus++odHYFHTakZHlDwSVQJkrJSYUa6h0CD
    D2M5vfNx4+DhpGq3/zwChTECgYB9pbFc0g5JUqZxKZFaiC1veg9xzy1Rbl/245WR
    gH2SxpTkQlQnxVfXVANmscyPfoPPNdCD72RWBparWqolJLdF0sFbkmoJGQHX5L60
    Az5o+AZfMVoAZnhrwu/prTjXsTaKmEF2+jEmK1EO2p/I1IfsTBSQ+GQjunKxXuCg
    pmXN3wKBgQCKLyvQT5y7G6YcbGtKQhj2LEgEwiUjurZFWVBSdvvIECfs2JO7uo1t
    PmqIQ43RfnOU5YAblnA3hJeAPKz/hlKw5NBwnOMrGFmOgb2xwyq+xlP83/g/C9dK
    PUjJn0D6MKOmfAcJJS3M2UbwwH6IF6j2xUBP33F2c5EVeMx/KGiYyA==
    -----END RSA PRIVATE KEY-----
    [root@hadoop2 ~]# cat /root/.ssh/id_rsa.pub 
    ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJER6Bw2TXjwXCRyqkkqVnGIzXftkXjTNy7o0vS+0JDDl72niCOSbLGChzWDya/MoZ1SIgnYrkCoNznruEQqm0/4A3rjhSw5b4t6ruzT3r7Xu1X9pw6djAOMmTAdlfWX2jzsLn25cPRrJ1/rRn2U3Ncm+x52ROFK4l83mvSqrdxPPfa9gIBqM+YTC7inuXc80AHmS85MgKh6wv4HGyzLYlgHkVYIv5kppPCI/osuexnnh+F6OPgNmmS68+PoFQVws/yMxHCKt3uPHmVht35fm/BXBNuIbQ5MTWtlub2cF15YBkJEczuS8QJqPB21NAB4yq/+832kJQZXzR0mUPyM8/ root@hadoop2
    [root@hadoop2 ~]# cat /root/.ssh/authorized_keys 
    ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/mSwq7gdDusAiW6gaA8ZAlCWOb9aCv4Bz/2L5JjpWwFPkZUQNpOtnfGRxi4X24Wrk4kq4Boj/mQ3U3sXIpeNz+ZyNVe3OPE3qKVDB2jve5pzeeM7qtWh4Ock30NzpznZFLeY9a+Ic8vDeXPPAEkgibgutibEqWyXuomSCIZGpPlh+HveY6Dtc/oRaoTEfxyLJS0FqyvzRdynCHgwiavbKFyzfL5IQVBRAqBYXLVdC6e4+gSUs1v9DAe/uEOtf5aw9AdScePSUY5AJ1fe2DTucXzci3zCIJoQ8bBedKFf0iIzWZZMLnlqLPG49E4tJCnI4qyyP6nyHv/mN6AEWk95t root@hadoop1
    [root@hadoop2 ~]# 
    

     

    hadoop2新生成了文件authorized_keys ,而hadoop1新生成了文件known_hosts 

    hadoop2的authorized_keys 中添加了hadoop1的/root/.ssh/id_rsa.pub

    进行An2次 hadoopM ssh-copy-id hadoopN后

    再进行   hadoopM ssh-copy-id hadoopM

    共执行An2+n次

    最后各个节点的

    .ssh/authorized_keys
    .ssh/known_hosts 
    [root@hadoop2 ~]# cat /root/.ssh/authorized_keys 
    ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/mSwq7gdDusAiW6gaA8ZAlCWOb9aCv4Bz/2L5JjpWwFPkZUQNpOtnfGRxi4X24Wrk4kq4Boj/mQ3U3sXIpeNz+ZyNVe3OPE3qKVDB2jve5pzeeM7qtWh4Ock30NzpznZFLeY9a+Ic8vDeXPPAEkgibgutibEqWyXuomSCIZGpPlh+HveY6Dtc/oRaoTEfxyLJS0FqyvzRdynCHgwiavbKFyzfL5IQVBRAqBYXLVdC6e4+gSUs1v9DAe/uEOtf5aw9AdScePSUY5AJ1fe2DTucXzci3zCIJoQ8bBedKFf0iIzWZZMLnlqLPG49E4tJCnI4qyyP6nyHv/mN6AEWk95t root@hadoop1
    ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8qqdIX6zERPKf7Ed4XUpRnKCaRxZTl4NMCRtI86NFrwIYroaDlSRrI1V3J/+GIdz49YbkPMt+a+RTgJ3KUvv3ZTzT6WGkUlJneyl3iWCHxzbSaXICDMnmmX+RhoDPlWQXu4wlZfrWDjo0NZII7au5ku5YQyTK1OHZFH0+OPKSnzR7mYMfMYHK5ukve6jWIe1+rtweUyWyJcF/3jF2KcgIm6bRJWD26wuuPVfUBLMJn12AbeEgK4EWWLt3886+7w0I4hSYDa09+CZ5vRFcckYMZKV2l6DTz7Mx3oSaeCfzOa703G/wS+YI9lM7G5D12vjRu6Yd4yG59lQRgXouxpkl root@hadoop3
    ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJER6Bw2TXjwXCRyqkkqVnGIzXftkXjTNy7o0vS+0JDDl72niCOSbLGChzWDya/MoZ1SIgnYrkCoNznruEQqm0/4A3rjhSw5b4t6ruzT3r7Xu1X9pw6djAOMmTAdlfWX2jzsLn25cPRrJ1/rRn2U3Ncm+x52ROFK4l83mvSqrdxPPfa9gIBqM+YTC7inuXc80AHmS85MgKh6wv4HGyzLYlgHkVYIv5kppPCI/osuexnnh+F6OPgNmmS68+PoFQVws/yMxHCKt3uPHmVht35fm/BXBNuIbQ5MTWtlub2cF15YBkJEczuS8QJqPB21NAB4yq/+832kJQZXzR0mUPyM8/ root@hadoop2
    [root@hadoop2 ~]# cat /root/.ssh/known_hosts 
    hadoop1,192.168.3.101 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDVVW5cw8LXqZq6aJJ9tw4idIUa1qq79AQpRrRB3zsKKtN3I9jJPYfL5IU0KRh1OO4oSvU/RV/B+KEkLayC86dI=
    hadoop3,192.168.3.103 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKAfre/2chabtppEpNdzgtyA4M62VXCR6sNfU6z4+MWe0dx+m2tSo67F7JrPNJ/NQfXO3TbQxXRawOEu9AbjhHg=
    hadoop2,192.168.3.102 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBKwk4ldwjl9bHfulRh/Go9dRfR70PK+XYiFAgE8JuCgBzLjfShC3JQpZNq1uDcXTPSwwWGWxfTe5lWLzKnA6jXc=
    [root@hadoop2 ~]# 
    

      内容相同

    配置细节参考官网文档,注意环境变量的JAVA_HOME正确

    Apache > Hadoop > Apache Hadoop Project Dist POM > Apache Hadoop 2.9.1 > Hadoop Cluster Setup
    http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html
    Apache Hadoop 2.9.1 – Hadoop Cluster Setup http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html
    在etc/hadoop/hadoop-env.sh中指明该节点的JAVA_HOME
    At the very least, you must specify the JAVA_HOME so that it is correctly defined on each remote node.
    
    java -verbose 找到jdk安装目录
    [Loaded java.lang.Shutdown$Lock from /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.171-8.b10.el7_5.x86_64/jre/lib/rt.jar]
    
    写入环境变量
    export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.102-4.b14.el7.x86_64
     
     
    etc/hadoop/core-site.xml
    Parameter	Value	Notes
    fs.defaultFS	NameNode URI	hdfs://host:port/
    io.file.buffer.size	131072	Size of read/write buffer used in SequenceFiles.
    
    
    <configuration>
      <property>
        <name>fs.defaultFS</name>
        <value>hdfs://hadoop1:9001/</value>
      </property>
      <property>
        <name>io.file.buffer.size</name>
        <value>131072</value>
      </property>
    </configuration>
    
    
    etc/hadoop/hdfs-site.xml
    Configurations for NameNode:
    Parameter	Value	Notes
    dfs.namenode.name.dir	Path on the local filesystem where the NameNode stores the namespace and transactions logs persistently.	If this is a comma-delimited list of directories then the name table is replicated in all of the directories, for redundancy.
    dfs.hosts / dfs.hosts.exclude	List of permitted/excluded DataNodes.	If necessary, use these files to control the list of allowable datanodes.
    dfs.blocksize	268435456	HDFS blocksize of 256MB for large file-systems.
    dfs.namenode.handler.count	100	More NameNode server threads to handle RPCs from large number of DataNodes.
    Configurations for DataNode:
    Parameter	Value	Notes
    dfs.datanode.data.dir	Comma separated list of paths on the local filesystem of a DataNode where it should store its blocks.	If this is a comma-delimited list of directories, then data will be stored in all named directories, typically on different devices.
    
    
    创建目录
    mkdir -p /home/hadoop-2.9.1/mydata/namenode;mkdir -p /home/hadoop-2.9.1/mydata/datanode;
    
    <configuration>
      <property>
        <name>dfs.replication</name>
        <value>2</value>
      </property>
      <property>
        <name>dfs.datanode.data.dir</name>
        <value>file://home/hadoop-2.9.1/mydata/datanode</value>
      </property>
      <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:///home/hadoop-2.9.1/mydata/namenode</value>
      </property>
      <property>
        <name>dfs.blocksize</name>
        <value>268435456</value>
      </property>
      <property>
        <name>dfs.namenode.handler.count</name>
        <value>100</value>
      </property>  
    </configuration>
    
     
     
     etc/hadoop/yarn-site.xml
     <configuration>
      <property>
        <name>yarn.resourcemanager.hostname</name>
        <value>hadoop1</value>
      </property>
      <property>
        <name>yarn.nodemanager.hostname</name>
        <value>hadoop1</value>
      </property> 
    </configuration>
    
    
    etc/hadoop/mapred-site.xml
    
    
    <configuration>
      <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
      </property>
    </configuration>
    
    
    Slaves File
    List all slave hostnames or IP addresses in your etc/hadoop/slaves file, one per line. 
    
    vim  etc/hadoop/slaves 
    hadoop1
    hadoop2
    hadoop3
    
    文件夹复制 
    scp -r /home/hadoop-2.9.1 hadoop2:/home/;
    scp -r /home/hadoop-2.9.1 hadoop1:/home/;
    
    
    效率
    HADOOP_PREFIX=/home/hadoop-2.9.1;export HADOOP_PREFIX;
    HADOOP_PREFIX=/home/hadoop-2.9.1;export HADOOP_PREFIX;
    【
    ssh hadoop2 "HADOOP_PREFIX=/home/hadoop-2.9.1;export HADOOP_PREFIX;";ssh hadoop1 "HADOOP_PREFIX=/home/hadoop-2.9.1;export HADOOP_PREFIX;";
    没有生效】
    
    $HADOOP_PREFIX/bin/hdfs namenode -format my_cluster_name
    用printenv检查
    HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop;export HADOOP_CONF_DIR;
    JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.171-8.b10.el7_5.x86_64/jre; export JAVA_HOME;
    
    $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode 
    
    HADOOP_YARN_HOME=$HADOOP_PREFIX/;export HADOOP_YARN_HOME;
    

      

    集群id不存在

    2018-07-18 09:11:32,098 WARN org.apache.hadoop.hdfs.server.common.Storage: Failed to add storage directory [DISK]file:/hadoop-2.9.1/mydata/datanode/
    java.io.IOException: Incompatible clusterIDs in /hadoop-2.9.1/mydata/datanode: namenode clusterID = CID-a6680204-4513-4ebc-b1eb-88be2c9cf9bc; datanode clusterID = CID-180160f6-f2cf-44c4-83eb-66e8164d99b5
    	at org.apache.hadoop.hdfs.server.datanode.DataStorage.doTransition(DataStorage.java:760)
    	at org.apache.hadoop.hdfs.server.datanode.DataStorage.loadStorageDirectory(DataStorage.java:293)
    	at org.apache.hadoop.hdfs.server.datanode.DataStorage.loadDataStorage(DataStorage.java:409)
    	at org.apache.hadoop.hdfs.server.datanode.DataStorage.addStorageLocations(DataStorage.java:388)
    	at org.apache.hadoop.hdfs.server.datanode.DataStorage.recoverTransitionRead(DataStorage.java:556)
    	at org.apache.hadoop.hdfs.server.datanode.DataNode.initStorage(DataNode.java:1649)
    	at org.apache.hadoop.hdfs.server.datanode.DataNode.initBlockPool(DataNode.java:1610)
    	at org.apache.hadoop.hdfs.server.datanode.BPOfferService.verifyAndSetNamespaceInfo(BPOfferService.java:388)
    	at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.connectToNNAndHandshake(BPServiceActor.java:280)
    	at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:816)
    	at java.lang.Thread.run(Thread.java:748)
    2018-07-18 09:11:32,101 ERROR org.apache.hadoop.hdfs.server.datanode.DataNode: Initialization failed for Block pool <registering> (Datanode Uuid f4b5087b-1763-4d99-88f8-cc934716fc1a) service to hadoop1/192.168.3.101:9001. Exiting. 
    java.io.IOException: All specified directories have failed to load.
    	at org.apache.hadoop.hdfs.server.datanode.DataStorage.recoverTransitionRead(DataStorage.java:557)
    	at org.apache.hadoop.hdfs.server.datanode.DataNode.initStorage(DataNode.java:1649)
    	at org.apache.hadoop.hdfs.server.datanode.DataNode.initBlockPool(DataNode.java:1610)
    	at org.apache.hadoop.hdfs.server.datanode.BPOfferService.verifyAndSetNamespaceInfo(BPOfferService.java:388)
    	at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.connectToNNAndHandshake(BPServiceActor.java:280)
    	at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:816)
    	at java.lang.Thread.run(Thread.java:748)
    2018-07-18 09:11:32,101 WARN org.apache.hadoop.hdfs.server.datanode.DataNode: Ending block pool service for: Block pool <registering> (Datanode Uuid f4b5087b-1763-4d99-88f8-cc934716fc1a) service to hadoop1/192.168.3.101:9001
    2018-07-18 09:11:32,202 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Removed Block pool <registering> (Datanode Uuid f4b5087b-1763-4d99-88f8-cc934716fc1a)
    
    ssh hadoop1 "rm -rf /home/hadoop-2.9.1/mydata/namenode/*;rm -rf /home/hadoop-2.9.1/mydata/datanode/*;";
    ssh hadoop2 "rm -rf /home/hadoop-2.9.1/mydata/namenode/*;rm -rf /home/hadoop-2.9.1/mydata/datanode/*;";
    ssh hadoop3 "rm -rf /home/hadoop-2.9.1/mydata/namenode/*;rm -rf /home/hadoop-2.9.1/mydata/datanode/*;";
    

      

     检查文件

    .bashrc .bash_profile

    [root@d1 ~]# cat ~/.bash_profile
    # .bash_profile

    # Get the aliases and functions
    if [ -f ~/.bashrc ]; then
    . ~/.bashrc
    fi

    # User specific environment and startup programs

    PATH=$PATH:$HOME/bin

    export PATH

    JAVA_HOME=/usr/local/jdk;export JAVA_HOME;

    HADOOP_PREFIX=/home/hadoop-2.9.1;export HADOOP_PREFIX;HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop;export HADOOP_CONF_DIR;HADOOP_HOME=/home/hadoop-2.9.1;export HADOOP_HOME;HADOOP_PREFIX=/home/hadoop-2.9.1;export HADOOP_PREFIX;HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop;export HADOOP_CONF_DIR;HADOOP_HOME=/home/hadoop-2.9.1;export HADOOP_HOME;HADOOP_YARN_HOME=$HADOOP_PREFIX;export HADOOP_YARN_HOME;
    [root@d1 ~]#

    source  ~/.bash_profile;

    scp   ~/.bashrc_profile root@d2:~/;

    ssh  d2  "source ~/.bash_profile";

    [root@d1 ~]# cd $HADOOP_HOME
    [root@d1 hadoop-2.9.1]# cat myCleanStart.sh 
    #【stop】
    $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop namenode; $HADOOP_PREFIX/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script hdfs stop datanode;$HADOOP_PREFIX/sbin/stop-dfs.sh;$HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager;$HADOOP_YARN_HOME/sbin/yarn-daemons.sh --config $HADOOP_CONF_DIR stop nodemanager;$HADOOP_PREFIX/sbin/stop-yarn.sh;ssh d3 "rm -rf /home/hadoop-2.9.1/mydata/namenode/*;rm -rf /home/hadoop-2.9.1/mydata/datanode/*;";ssh d2 "rm -rf /home/hadoop-2.9.1/mydata/namenode/*;rm -rf /home/hadoop-2.9.1/mydata/datanode/*;";ssh d1 "rm -rf /home/hadoop-2.9.1/mydata/namenode/*;rm -rf /home/hadoop-2.9.1/mydata/datanode/*;";ssh d3 "rm -rf /home/hadoop-2.9.1/logs/*;rm -rf /home/hadoop-2.9.1/logs/*;";ssh d2 "rm -rf /home/hadoop-2.9.1/logs/*;rm -rf /home/hadoop-2.9.1/logs/*;";ssh d1 "rm -rf /home/hadoop-2.9.1/logs/*;rm -rf /home/hadoop-2.9.1/logs/*;";
    
    #【del】
    ssh d3 "rm -rf /home/hadoop-2.9.1/mydata/namenode/*;rm -rf /home/hadoop-2.9.1/mydata/datanode/*;";ssh d2 "rm -rf /home/hadoop-2.9.1/mydata/namenode/*;rm -rf /home/hadoop-2.9.1/mydata/datanode/*;";ssh d1 "rm -rf /home/hadoop-2.9.1/mydata/namenode/*;rm -rf /home/hadoop-2.9.1/mydata/datanode/*;";ssh d3 "rm -rf /home/hadoop-2.9.1/logs/*;rm -rf /home/hadoop-2.9.1/logs/*;";ssh d2 "rm -rf /home/hadoop-2.9.1/logs/*;rm -rf /home/hadoop-2.9.1/logs/*;";ssh d1 "rm -rf /home/hadoop-2.9.1/logs/*;rm -rf /home/hadoop-2.9.1/logs/*;";
    
    
    
    #【start】
    $HADOOP_PREFIX/bin/hdfs namenode -format mycluster_name;$HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode;$HADOOP_PREFIX/sbin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script hdfs start datanode;$HADOOP_PREFIX/sbin/start-dfs.sh;$HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager;$HADOOP_YARN_HOME/sbin/yarn-daemons.sh --config $HADOOP_CONF_DIR start nodemanager;
    
    
    [root@d1 hadoop-2.9.1]# 
    

      

    1.1.1.182 cmd
    1.1.1.88 hadoop-name
    1.1.1.89 hadoop-data-a
    1.1.1.90 hadoop-data-b
    
    1.1.1.182     
    +*uu
    
    1.1.1.88
    1.1.1.89
    1.1.1.90
    dd
    
    
    scp /etc/hosts root@hadoop-name:/etc/;scp /etc/hosts root@hadoop-data-a:/etc/;scp /etc/hosts root@hadoop-data-b:/etc/;
    
    
    # cd ~;ssh-keygen -t rsa; 
    
    ssh cmd "rm -rf ~/.ssh;ls ~/"; ssh hadoop-name "rm -rf ~/.ssh;ls ~/";ssh hadoop-data-a "rm -rf ~/.ssh;ls ~/";ssh hadoop-data-b "rm -rf ~/.ssh;ls ~/";
    
    # TODO -->script
     ssh hadoop-name; 
     hostname hadoop-name;cd ~;ssh-keygen -t rsa; scp ~/.ssh/id_rsa.pub root@cmd:~/.ssh/authorized_keys_`hostname`;
     ssh hadoop-data-a;
     hostname hadoop-data-a;cd ~;ssh-keygen -t rsa; scp ~/.ssh/id_rsa.pub root@cmd:~/.ssh/authorized_keys_`hostname`;
    
     ssh hadoop-data-b; 
     hostname hadoop-data-b;cd ~;ssh-keygen -t rsa; scp ~/.ssh/id_rsa.pub root@cmd:~/.ssh/authorized_keys_`hostname`;
    
    ssh cmd;
    cd ~/.ssh/;cat authorized_keys_*>>authorized_keys; cat id_rsa.pub>>authorized_keys; chmod 400 authorized_keys;
    scp authorized_keys root@hadoop-name:~/.ssh/;scp authorized_keys root@hadoop-data-a:~/.ssh/;scp authorized_keys root@hadoop-data-b:~/.ssh/;
    scp known_hosts root@hadoop-name:~/.ssh/;scp known_hosts root@hadoop-data-a:~/.ssh/;scp known_hosts root@hadoop-data-b:~/.ssh/;
    

      

    tail hadoop-hdp-datanode-hadoop-name.log
            at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:2799)
            at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:2714)
            at org.apache.hadoop.hdfs.server.datanode.DataNode.createDataNode(DataNode.java:2756)
            at org.apache.hadoop.hdfs.server.datanode.DataNode.secureMain(DataNode.java:2900)
            at org.apache.hadoop.hdfs.server.datanode.DataNode.main(DataNode.java:2924)
    2019-11-07 13:14:44,477 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1: org.apache.hadoop.util.DiskChecker$DiskErrorException: Too many failed volumes - current valid volumes: 0, volumes configured: 1, volumes failed: 1, volume failures tolerated: 0
    2019-11-07 13:14:44,482 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: SHUTDOWN_MSG:
    /************************************************************
    

      

    http://www.firefoxbug.com/index.php/archives/2424/

  • 相关阅读:
    SCRUM站立会议
    燃尽图
    第一次作业----词频统计
    构建之法读感
    final 评论 II
    final 评论 I
    第十一周PSP
    学期回顾
    第十周PSP
    Gradle学习笔记
  • 原文地址:https://www.cnblogs.com/rsapaper/p/7868941.html
Copyright © 2020-2023  润新知