1.修改主机名
hostnamectl set-hostname master
bash
2.替换本地源
pkill -9 yum //杀掉yum进程
cd /etc/yum.repos.d
cat bigdata.repo //查看本地源
rm -rf * //删除
wget http://172.19.47.240/bigdata/repofile/bigdata.repo //下载本地源
cat bigdata.repo //查看本地源
yum clean all //清除之前的yum缓存
3.修改映射文件
vi /etc/hosts
4.时间同步
tzselect
vim /etc/profile //将TZ='Asia/Shanghai'; export TZ粘贴到里面
source /etc/profile
date
5.下载ntp
yum install -y ntp
6.屏蔽默认server,设置master为本地时钟源,服务器层级设为10
vim /etc/ntp.conf //修改配置文件
注释掉
server 0.centos.pool.ntp.org.iburst
server 1.centos.pool.ntp.org.iburst
server 2.centos.pool.ntp.org.iburst
server 3.centos.pool.ntp.org.iburst
添加
server 127.127.1.0 #local clock
fudge 127.127.1.0 stratum 10
7.在早八-晚五时间段内每隔半个小时同步一次时间
systemctl status ntpd.service //看时钟源是否开启
systemctl start ntpd.service //开启时钟源服务
在slave01上同步master
ntpdate master
crontab -e //定时
*/30 8-17 * * * /usr/sbin/ntpdate master
8.配置免密登录
ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa //生成密钥
cat /root/.ssh/id_dsa.pub >> /root/.ssh/authorized_keys //将密钥复制到authorized_keys
scp ~/.ssh/authorized_keys root@slave01:~/.ssh/ //远程复制
9.安装jdk
mdkir -p /usr/java
cd /usr/java
wget http://172.16.47.240/bigdata/bigdata_tar/jdk-8u171-linux-x64.tar.gz
tar -zxvf jdk-8u171-linux-x64.tar.gz -C /usr/java/
rm -rf /usr/java/jdk-8u171-linux-x64.tar.gz
vi /etc/profile
export JAVA_HOME=/usr/java/jdk1.8.0_171
export CLASSPATH=$JAVA_HOME/lib/
export PATH=$PATH:$JAVA_HOME/bin
source /etc/profile
10.安装zookeeper
mkdir -p /usr/zookeeper && cd /usr/zookeeper
wget http://172.16.47.240/bigdata/bigdata_tar/zookeeper-3.4.10.tar.gz
tar -zxvf /usr/zookeeper/zookeeper-3.4.10.tar.gz -C /usr/zookeeper
rm -rf /usr/zookeeper/zookeeper-3.4.10.tar.gz
vi /etc/profile
export ZOOKEEPER_HOME=/usr/zookeeper/zookeeper-3.4.10
export PATH=$PATH:$ZOOKEEPER_HOME/bin
source /etc/profile
cd /usr/zookeeper/zookeeper-3.4.10/conf && mv zoo_sample.cfg zoo.cfg && vi zoo.cfg
写入
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/zookeeper/zookeeper-3.4.10/zkdata
clientPort=2181
dataLogDir=/usr/zookeeper/zookeeper-3.4.10/zkdatalog
server.1=master:2888:3888
server.2=slave01:2888:3888
server.3=slave02:2888:3888
cd zookeeper-3.4.10
mkdir zkdata zkdatalog
cd zkdata
vi myid
写入
1
在slave01上
cd zookeeper-3.4.10
echo 2 >> myid
在slave02上
cd zookeeper-3.4.10
echo 3 >> myid
开启
zkServer.sh start
zkServer.sh status
查看错误信息
cd zkdata
cat zookeeper.out
11.安装hadoop
mkdir -p /usr/hadoop && cd /usr/hadoop
wget http://172.16.47.240/bigdata/bigdata_tar/hadoop-2.7.3.tar.gz
tar -zxvf /usr/hadoop/hadoop-2.7.3.tar.gz -C /usr/hadoop
rm -rf /sur/hadoop/hadoop-2.7.3.tar.gz
vi /etc/profile
export HADOOP_HOME=/usr/hadoop/hadoop-2.7.3
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
source /etc/profile
cd $HADOOP_HOME/etc/hadoop
vi hadoop-env.sh
vi core-site.xml
vi hdfs-site.xml
vi slaves
vi yarn-env.sh
echo "export JAVA_HOME=/usr/java/jdk1.8.0_171" >> yarn-env.sh
YARN框架的核心配置
vi yarn-site.xml
<!--指定ResourceManager的地址-->
<property>
<name>yarn.resourcemanager.address</name>
<value>master:18040</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:18030</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>master:18088</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>master:18025</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>master:10141</value>
</property>
<!--指定reducer获取数据的方式-->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
cp mapred-site.xml.template mapred-site.xml && vi mapred-site.xml
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
11.master为主节点,slave01和slave02为子节点
echo master > master
echo slave01 > slaves && echo slave2 >> slaves
12.格式化
cd hadoop-2.7.3
hadoop namenode -format
start-all.sh
13.安装mysql
yum -y install mysql-community-server
systemctl start mysqld //开启mysql
grep "temporary password" /var/log/mysqld.log //查看密码
mysql -u root -p
set global validate_password_policy=0; //降低密码等级
set global validate_password_length=4;
alter user 'root'@'localhost' identified by '123456';
create user 'root'@'%' identified by '123456'; //创建用户
grant all privileges on *.* to 'root'@'%' with grant option; //允许远程连接
flush privileges; //刷新权限
create database hongyaa;
14.安装hive
mkdir -p /usr/hive && cd /usr/hive
wget http://172.16.47.240/bigdata/bigdata_tar/apache-hive-2.1.1-bin.tar.gz
tar -zxvf /usr/hive/apache-hive-2.1.1-bin.tar.gz -C /usr/hive/
rm -rf /usr/hive/apache-hive-2.1.1-bin.tar.gz
vi /etc/profile
export HIVE_HOME=/usr/hive/apache-hive-2.3.6-bin
export PATH=$PATH:$HIVE_HOME/bin
source /etc/profile
配置hive运行环境
cd $HIVE_home/conf && vi hive-env.sh
# 配置Hadoop安装路径
export HADOOP_HOME=/usr/hadoop/hadoop-2.7.3
# 配置Hive配置文件存放路径
export HIVE_CONF_DIR=/usr/hive/apache-hive-2.3.6-bin/conf
# 配置Hive运行资源路径
export HIVE_AUX_JARS_PATH=/usr/hive/apache-hive-2.3.6-bin/lib
解决版本冲突
cp $HIVE_HOME/lib/jline-2.12.jar $HADOOP_HOME/share/hadoop/yarn/lib/
15.
cd $HIVE_HOME/lib && wget http://172.16.47.240/bigdata/bigdata_tar/mysql-connector-java-5.1.47-bin.jar
cd $HIVE_HOME/conf
vi hive-site.xml
写入配置文件
cd apache-hive-2.1.1-bin
schematool -dbType mysql -initSchema //数据初始化
hive --service metastore //开启服务
转为json
JSON.stringify(option)