• 大数据手动搭建步骤整理(三)Zookeeper、Hadoop


    主要内容:Zookeeper、Hadoop

    每一行外部的是命令。----内是打开文档写在内档内的内容

    视频:

    手工搭建大数据环境【4】java、Hadoop、Scala、spark

     1 [Zookeeper]
     2 
     3 vi /etc/hosts
     4 
     5 ------------------------------------ 在后面添加master.root这些
     6 
     7 192.168.15.104 master master.root
     8 
     9 192.168.15.127 slave1 slave1.root
    10 
    11 192.168.15.124 slave2 slave2.root
    12 
    13 ------------------------------------
    14 
    15 mkdir -p /usr/zookeeper
    16 
    17 tar -zxvf /opt/soft/zookeeper-3.4.10.tar.gz -C /usr/zookeeper/
    18 
    19 cd /usr/zookeeper/zookeeper-3.4.10/conf/
    20 
    21 vi zoo.cfg
    22 
    23 ----------------------------------------注意粘贴过去有没有漏掉
    24 
    25 tickTime=2000
    26 
    27 initLimit=10
    28 
    29 syncLimit=5
    30 
    31 dataDir=/usr/zookeeper/zookeeper-3.4.10/zkdata
    32 
    33 clientPort=2181
    34 
    35 dataLogDir=/usr/zookeeper/zookeeper-3.4.10/zkdatalog
    36 
    37 server.1=master:2888:3888
    38 
    39 server.2=slave1:2888:3888
    40 
    41 server.3=slave2:2888:3888
    42 
    43 ------------------------------------------
    44 
    45 zookeeper-3.4.10]#mkdir zkdata
    46 
    47 zookeeper-3.4.10]#mkdir zkdatalog
    48 
    49 zookeeper-3.4.10]#cd zkdata
    50 
    51 zookeeper-3.4.10]#vi myid
    52 
    53 ----------------------
    54 
    55 1
    56 
    57 -----------------------
    58 
    59 scp -r /usr/zookeeper root@slave1:/usr/zookeeper
    60 
    61 scp -r /usr/zookeeper root@slave2:/usr/zookeeper
    62 
    63 zookeeper-3.4.10]#vi myid
    64 
    65 ----------------------slave1为2、slave2为3...
    66 
    67 2
    68 
    69 -----------------------
    70 
    71 vi /etc/profile
    72 
    73 --------------------------------------------------------
    74 
    75 #set zookeeper environment
    76 
    77 export ZOOKEEPER_HOME=/usr/zookeeper/zookeeper-3.4.10
    78 
    79 PATH=$PATH:$ZOOKEEPER_HOME/bin
    80 
    81 ---------------------------------------------------------
    82 
    83 zookeeper-3.4.10]#bin/zkServer.sh start    -->slave1、slave2都执行下
    84 
    85 zookeeper-3.4.10]#bin/zkServer.sh status
      1 [Hadoop]
      2 
      3 mkdir -p /usr/hadoop
      4 
      5 tar -zxvf /opt/soft/hadoop-2.7.3.tar.gz -C /usr/hadoop/
      6 
      7 vi /etc/profile
      8 
      9 ---------------------------------------------------------
     10 
     11 #HADOOP
     12 
     13 export HADOOP_HOME=/usr/hadoop/hadoop-2.7.3
     14 
     15 export CLASSPATH=$CLASSPATH:$HADOOP_HOME/lib
     16 
     17 export PATH=$PATH:$HADOOP_HOME/bin
     18 
     19 ---------------------------------------------------------
     20 
     21 source /etc/profile
     22 
     23 cd /usr/hadoop/hadoop-2.7.3/etc/hadoop
     24 
     25 hadoop]#vi hadoop-env.sh
     26 
     27 -------------------------------------------
     28 
     29 export JAVA_HOME=/usr/java/jdk1.8.0_201
     30 
     31 -------------------------------------------
     32 
     33 hadoop]#leafpad core-site.xml
     34 
     35 ------------------------------------------
     36 
     37 <?xml version="1.0" encoding="UTF-8"?>
     38 
     39 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
     40 
     41 <configuration>
     42 
     43 <property>
     44 
     45 <name>fs.default.name</name>
     46 
     47   <value>hdfs://master:9000</value>
     48 
     49 </property>
     50 
     51 <property>
     52 
     53 <name>hadoop.tmp.di</name>
     54 
     55   <value>/usr/hadoop/haddop-2.7.3/hdfs/tmp</value>
     56 
     57 <description>A base for other temporary directories.</description>
     58 
     59 </property>
     60 
     61 <property>
     62 
     63 <name>io.file.buffer.size</name>
     64 
     65   <value>131072</value>
     66 
     67 </property>
     68 
     69 <property>
     70 
     71 <name>fs.checkpoint.period</name>
     72 
     73   <value>60</value>
     74 
     75 </property>
     76 
     77 <property>
     78 
     79 <name>fs.checkpoint.size</name>
     80 
     81   <value>67108864</value>
     82 
     83 </property>
     84 
     85 </configuration>
     86 
     87 -----------------------------------------
     88 
     89 hadoop]#vi yarn-site.xml
     90 
     91 ----------------------------------------------
     92 
     93 <?xml version="1.0"?>
     94 
     95 <configuration>
     96 
     97 <property>
     98 
     99 <name>yarn.resourcemanager.address</name>
    100 
    101 <value>master:18040</value>
    102 
    103 </property>
    104 
    105 <property>
    106 
    107 <name>yarn.resourcemanager.scheduler.address</name>
    108 
    109 <value>master:18030</value>
    110 
    111 </property>
    112 
    113 <property>
    114 
    115 <name>yarn.resourcemanager.webapp.address</name>
    116 
    117 <value>master:18088</value>
    118 
    119 </property>
    120 
    121 <property>
    122 
    123 <name>yarn.resourcemanager.resource-tracker.address</name>
    124 
    125 <value>master:18025</value>
    126 
    127 </property>
    128 
    129 <property>
    130 
    131 <name>yarn.resourcemanager.admin.address</name>
    132 
    133 <value>master:18141</value>
    134 
    135 </property>
    136 
    137 <property>
    138 
    139 <name>yarn.nodemanager.aux-services</name>
    140 
    141 <value>mapreduce_shuffle</value>
    142 
    143 </property>
    144 
    145 <property>
    146 
    147 <name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
    148 
    149 <value>org.apache.hadoop.mapred.ShuffleHandler</value>
    150 
    151 </property>
    152 
    153 </configuration>
    154 
    155 ----------------------------------------------
    156 
    157 hadoop]#vi hdfs-site.xml
    158 
    159 ------------------------------
    160 
    161 <?xml version="1.0" encoding="UTF-8"?>
    162 
    163 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
    164 
    165 <configuration>
    166 
    167 <property>
    168 
    169 <name>dfs.replication</name>
    170 
    171 <value>2</value>
    172 
    173 </property>
    174 
    175 <property>
    176 
    177 <name>dfs.namenode.name.dir</name>
    178 
    179 <value>file:/usr/47adoop/47adoop-2.7.3/hdfs/name</value>
    180 
    181 <final>true</final>
    182 
    183 </property>
    184 
    185 <property>
    186 
    187 <name>dfs.datanode.data.dir</name>
    188 
    189 <value>file:/usr/47adoop/47adoop-2.7.3/hdfs/data</value>
    190 
    191 <final>true</final>
    192 
    193 </property>
    194 
    195 <property>
    196 
    197 <name>dfs.namenode.secondary.http-address</name>
    198 
    199 <value>master:9001</value>
    200 
    201 </property>
    202 
    203 <property>
    204 
    205 <name>dfs.webhdfs.enabled</name>
    206 
    207 <value>true</value>
    208 
    209 </property>
    210 
    211 <property>
    212 
    213 <name>dfs.permissions</name>
    214 
    215 <value>false</value>
    216 
    217 </property>
    218 
    219 </configuration>
    220 
    221 ------------------------------
    222 
    223 hadoop]#vi mapred-site.xml
    224 
    225 -----------------------------------------
    226 
    227 <?xml version="1.0"?>
    228 
    229 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
    230 
    231 <configuration>
    232 
    233 <property>
    234 
    235 <name>mapreduce.framework.name</name>
    236 
    237 <value>yarn</value>
    238 
    239 </property>
    240 
    241 </configuration>
    242 
    243 ------------------------------------------
    244 
    245 hadoop]#vi slaves
    246 
    247 --------------------
    248 
    249 slave1
    250 
    251 slave2
    252 
    253 --------------------
    254 
    255 hadoop]#vi master
    256 
    257 ------------------
    258 
    259 master
    260 
    261 ------------------
    262 
    263 scp -r /usr/hadoop root@slave1:/usr/hadoop
    264 
    265 scp -r /usr/hadoop root@slave2:/usr/hadoop
    266 
    267 hadoop]#hadoop namenode -format  --->exiting with status 0
    268 
    269 hadoop]#cd ..
    270 
    271 etc]#cd ..
    272 
    273 hadoop-2.7.3]#sbin/start-all.sh    -->master带动其他
    274 
    275 hadoop-2.7.3]#jps   
    276 
    277 ---->master: SecondarNameNode、RsourceManager、NameNode
    278 
    279 ---->slave1slave2:DataNode、NodeManager
    280 
    281 访问集群:http://masterip:50070
  • 相关阅读:
    Networking with standalone containers
    记filebeat内存泄漏问题分析及调优
    原创-The Salt Master has rejected this minion's public key!解决方法
    原创-某次建表失败-ERROR 1101 (42000): BLOB/TEXT column can’t have a default value
    action命令-判断判断码是否正确
    docker-docker中用户uid异常导致权限不足
    非原创-docker 6种减小镜像大小的方式
    非原创-docker update
    原创-k8s 存活探针,就绪探针与启动探针
    原创-阿里elasticsearch数据迁移
  • 原文地址:https://www.cnblogs.com/blackicelisa/p/12263734.html
Copyright © 2020-2023  润新知