1、节点规划
节点 | IP | hostname | 硬件 |
master | 192.168.80.136 | master | 内存2G CPU:2*1 |
node1 | 192.168.80.137 | node1 | 内存2G CPU:2*1 |
node2 | 192.168.80.138 | node2 | 内存2G CPU:2*1 |
etcd | 192.168.80.139 | etcd | 内存2G CPU:2*1 |
2、系统配置(所有机器)
-
关闭selinux
-
# 临时关闭 $ setenforce 0 # 永久关闭 $ vim /etc/selinux/config SELINUX=disabled
-
关闭且禁用防火墙
-
$ systemctl stop firewalld
$ systemctl disable firewalld -
修改hostname
-
vi /ect/hostname # 四台机器分别修改为 master node1 node2 etcd
-
修改hosts文件
-
$ vim /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.80.136 master 192.168.80.137 node1 192.168.80.138 node2 192.168.80.139 etcd
-
ssh免密登陆
-
$ ssh-keygen # 一路回车默认即可 $ ssh-copy-id {其他机器的ip}
-
更新yum
$ yum update
-
安装相关组件
-
$ yum install -y etcd kubernetes ntp flannel
3、各节点配置
-
etcd服务器
-
# 修改配置文件 $ vim /etc/etcd/etcd.conf ETCD_NAME=default ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_CLIENT_URLS="http://localhost:2379,http://192.168.80.139:2379" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.80.139:2379" # 启动服务 $ systemctl start etcd $ systemctl enable etcd #检查etcd cluster状态 $ etcdctl cluster-health member 8e9e05c52164694d is healthy: got healthy result from http://192.168.80.139:2379 cluster is healthy
# 设置一个key,下面会用到
etcdctl set /k8s/network/config '{"Network": "10.255.0.0/16"}' -
master服务器
-
# 配置kube-apiserver配置文件 $ vim /etc/kubernetes/config KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=0" KUBE_ALLOW_PRIV="--allow-privileged=false" KUBE_MASTER="--master=http://192.168.80.136:8080" $ vim /etc/kubernetes/apiserver KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0" KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.80.139:2379" KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" KUBE_ADMISSION_CONTROL="--admission-control=AlwaysAdmit" KUBE_API_ARGS="" #配置kube-scheduler配置文件 vim /etc/kubernetes/scheduler KUBE_SCHEDULER_ARGS="--address=0.0.0.0" # 启动服务 for SERVICES in kube-apiserver kube-controller-manager kube-scheduler do systemctl start $SERVICES
systemctl enable $SERVICES
done
-
node1节点
-
# 配置node1网络 $ vim /etc/sysconfig/flanneld FLANNEL_ETCD_ENDPOINTS="http://192.168.80.139:2379" FLANNEL_ETCD_PREFIX="/k8s/network" FLANNEL_OPTIONS="--iface=ens33" # ens是当前可用上网的网卡名称 # 配置node1 kube-proxy $ vim /etc/kubernetes/config KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=0" KUBE_ALLOW_PRIV="--allow-privileged=false" KUBE_MASTER="--master=http://192.168.80.136:8080" $ vim /etc/kubernetes/proxy KUBE_PROXY_ARGS="--bind=address=0.0.0.0" # 配置node1 kubelet $ vim /etc/kubernetes/kubelet KUBELET_ADDRESS="--address=127.0.0.1" KUBELET_HOSTNAME="--hostname-override=node1" KUBELET_API_SERVER="--api-servers=http://192.168.80.136:8080" KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest" KUBELET_ARGS="" # 启动node1服务 for SERVICES in flanneld kube-proxy kubelet; do systemctl restart $SERVICES systemctl enable $SERVICES systemctl status $SERVICES done
-
node2节点
-
# 配置node2网络 $ vim /etc/sysconfig/flanneld FLANNEL_ETCD_ENDPOINTS="http://192.168.80.139:2379" FLANNEL_ETCD_PREFIX="/k8s/network" FLANNEL_OPTIONS="--iface=ens33" # 配置node2 kube-proxy $ vim /etc/kubernetes/config KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=0" KUBE_ALLOW_PRIV="--allow-privileged=false" KUBE_MASTER="--master=http://192.168.80.136:8080" $ vim /etc/kubernetes/proxy KUBE_PROXY_ARGS="--bind-address=0.0.0.0" # 配置node2 kubelet $ vim /etc/kubernetes/kubelet KUBELET_ADDRESS="--address=127.0.0.1" KUBELET_HOSTNAME="--hostname-override=node1" KUBELET_API_SERVER="--api-servers=http://192.168.80.136:8080" KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest" KUBELET_ARGS="" # 启动node2服务 for SERVICES in flanneld kube-proxy kubelet docker; do systemctl restart $SERVICES systemctl enable $SERVICES systemctl status $SERVICES done
4、测试
[root@master ~]# kubectl get nodes NAME STATUS AGE node1 Ready 55m node2 Ready 54m
5、参考文章:
https://blog.csdn.net/lic95/article/details/55015284
https://www.kubernetes.org.cn/3096.html