创建kubelet配置
• set-cluster # 创建需要连接的集群信息,可以创建多个k8s集群信息 [root@hdss7-21 ~]# kubectl config set-cluster myk8s --certificate-authority=/opt/apps/kubernetes/server/bin/certs/ca.pem --embed-certs=true --server=https://10.4.7.10:7443 --kubeconfig=/opt/apps/kubernetes/conf/kubelet.kubeconfig • set-credentials # 创建用户账号,即用户登陆使用的客户端私有和证书,可以创建多个证书 [root@hdss7-21 ~]# kubectl config set-credentials k8s-node --client-certificate=/opt/apps/kubernetes/server/bin/certs/client.pem --client-key=/opt/apps/kubernetes/server/bin/certs/client-key.pem --embed-certs=true --kubeconfig=/opt/apps/kubernetes/conf/kubelet.kubeconfig • set-context # 设置context,即确定账号和集群对应关系 [root@hdss7-21 ~]# kubectl config set-context myk8s-context --cluster=myk8s --user=k8s-node --kubeconfig=/opt/apps/kubernetes/conf/kubelet.kubeconfig • use-context # 设置当前使用哪个context [root@hdss7-21 ~]# kubectl config use-context myk8s-context --kubeconfig=/opt/apps/kubernetes/conf/kubelet.kubeconfig
授权k8s-node用户
授权 k8s-node 用户绑定集群角色 system:node ,让 k8s-node 成为具备运算节点的权限。
[root@hdss7-21 ~]# vim k8s-node.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: k8s-node roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:node subjects: - apiGroup: rbac.authorization.k8s.io kind: User name: k8s-node [root@hdss7-21 ~]# kubectl create -f k8s-node.yaml clusterrolebinding.rbac.authorization.k8s.io/k8s-node created [root@hdss7-21 ~]# kubectl get clusterrolebinding k8s-node NAME AGE k8s-node 36s
创建启动脚本
[root@hdss7-21 ~]# vim /opt/apps/kubernetes/server/bin/kubelet-startup.sh #!/bin/sh WORK_DIR=$(dirname $(readlink -f $0)) [ $? -eq 0 ] && cd $WORK_DIR || exit /opt/apps/kubernetes/server/bin/kubelet --anonymous-auth=false --cgroup-driver systemd --cluster-dns 192.168.0.2 --cluster-domain cluster.local --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice --fail-swap-on="false" --client-ca-file ./certs/ca.pem --tls-cert-file ./certs/kubelet.pem --tls-private-key-file ./certs/kubelet-key.pem --hostname-override hdss7-21.host.com --image-gc-high-threshold 20 --image-gc-low-threshold 10 --kubeconfig ../../conf/kubelet.kubeconfig --log-dir /data/logs/kubernetes/kube-kubelet --pod-infra-container-image harbor.od.com/public/pause:latest --root-dir /data/kubelet [root@hdss7-21 ~]# chmod u+x /opt/apps/kubernetes/server/bin/kubelet-startup.sh [root@hdss7-21 ~]# mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet [root@hdss7-21 ~]# vim /etc/supervisord.d/kube-kubelet.ini [program:kube-kubelet-7-21] command=/opt/apps/kubernetes/server/bin/kubelet-startup.sh numprocs=1 directory=/opt/apps/kubernetes/server/bin autostart=true autorestart=true startsecs=30 startretries=3 exitcodes=0,2 stopsignal=QUIT stopwaitsecs=10 user=root redirect_stderr=true stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log stdout_logfile_maxbytes=64MB stdout_logfile_backups=5 stdout_capture_maxbytes=1MB stdout_events_enabled=false [root@hdss7-21 ~]# supervisorctl update [root@hdss7-21 ~]# supervisorctl status etcd-server-7-21 RUNNING pid 23637, uptime 1 day, 14:56:25 kube-apiserver-7-21 RUNNING pid 32591, uptime 16:35:54 kube-controller-manager-7-21 RUNNING pid 33357, uptime 14:40:09 kube-kubelet-7-21 RUNNING pid 37232, uptime 0:01:08 kube-scheduler-7-21 RUNNING pid 33450, uptime 14:30:50 [root@hdss7-21 ~]# kubectl get node NAME STATUS ROLES AGE VERSION hdss7-21.host.com Ready <none> 3m13s v1.15.2 hdss7-22.host.com Ready <none> 3m13s v1.15.2
修改节点角色
[root@hdss7-21 ~]# kubectl get node NAME STATUS ROLES AGE VERSION hdss7-21.host.com Ready <none> 3m13s v1.15.2 hdss7-22.host.com Ready <none> 3m13s v1.15.2 [root@hdss7-21 ~]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/node= node/hdss7-21.host.com labeled [root@hdss7-21 ~]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/master= node/hdss7-21.host.com labeled [root@hdss7-21 ~]# kubectl label node hdss7-22.host.com node-role.kubernetes.io/master= node/hdss7-22.host.com labeled [root@hdss7-21 ~]# kubectl label node hdss7-22.host.com node-role.kubernetes.io/node= node/hdss7-22.host.com labeled [root@hdss7-21 ~]# kubectl get node NAME STATUS ROLES AGE VERSION hdss7-21.host.com Ready master,node 7m44s v1.15.2 hdss7-22.host.com Ready master,node 7m44s v1.15.2
创建kube-proxy配置
[root@hdss7-21 ~]# kubectl config set-cluster myk8s --certificate-authority=/opt/apps/kubernetes/server/bin/certs/ca.pem --embed-certs=true --server=https://10.4.7.10:7443 --kubeconfig=/opt/apps/kubernetes/conf/kube-proxy.kubeconfig [root@hdss7-21 ~]# kubectl config set-credentials kube-proxy --client-certificate=/opt/apps/kubernetes/server/bin/certs/kube-proxy-client.pem --client-key=/opt/apps/kubernetes/server/bin/certs/kube-proxy-client-key.pem --embed-certs=true --kubeconfig=/opt/apps/kubernetes/conf/kube-proxy.kubeconfig [root@hdss7-21 ~]# kubectl config set-context myk8s-context --cluster=myk8s --user=kube-proxy --kubeconfig=/opt/apps/kubernetes/conf/kube-proxy.kubeconfig [root@hdss7-21 ~]# kubectl config use-context myk8s-context --kubeconfig=/opt/apps/kubernetes/conf/kube-proxy.kubeconfig
加装ipvs模块
[root@hdss7-21 ~]# for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done [root@hdss7-21 ~]# lsmod | grep ip_vs # 查看ipvs模块
创建启动脚本
[root@hdss7-21 ~]# vim /opt/apps/kubernetes/server/bin/kube-proxy-startup.sh #!/bin/sh WORK_DIR=$(dirname $(readlink -f $0)) [ $? -eq 0 ] && cd $WORK_DIR || exit /opt/apps/kubernetes/server/bin/kube-proxy --cluster-cidr 172.7.0.0/16 --hostname-override hdss7-21.host.com --proxy-mode=ipvs --ipvs-scheduler=nq --kubeconfig ../../conf/kube-proxy.kubeconfig [root@hdss7-21 ~]# chmod u+x /opt/apps/kubernetes/server/bin/kube-proxy-startup.sh [root@hdss7-21 ~]# mkdir -p /data/logs/kubernetes/kube-proxy [root@hdss7-21 ~]# vim /etc/supervisord.d/kube-proxy.ini [program:kube-proxy-7-21] command=/opt/apps/kubernetes/server/bin/kube-proxy-startup.sh numprocs=1 directory=/opt/apps/kubernetes/server/bin autostart=true autorestart=true startsecs=30 startretries=3 exitcodes=0,2 stopsignal=QUIT stopwaitsecs=10 user=root redirect_stderr=true stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log stdout_logfile_maxbytes=64MB stdout_logfile_backups=5 stdout_capture_maxbytes=1MB stdout_events_enabled=false [root@hdss7-21 ~]# supervisorctl update
验证集群
[root@hdss7-21 ~]# supervisorctl status etcd-server-7-21 RUNNING pid 23637, uptime 2 days, 0:27:18 kube-apiserver-7-21 RUNNING pid 32591, uptime 1 day, 2:06:47 kube-controller-manager-7-21 RUNNING pid 33357, uptime 1 day, 0:11:02 kube-kubelet-7-21 RUNNING pid 37232, uptime 9:32:01 kube-proxy-7-21 RUNNING pid 47088, uptime 0:06:19 kube-scheduler-7-21 RUNNING pid 33450, uptime 1 day, 0:01:43 [root@hdss7-21 ~]# yum install -y ipvsadm [root@hdss7-21 ~]# ipvsadm -Ln IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP 192.168.0.1:443 nq -> 10.4.7.21:6443 Masq 1 0 0 -> 10.4.7.22:6443 Masq 1 0 0 [root@hdss7-21 ~]# curl -I 172.7.21.2 HTTP/1.1 200 OK Server: nginx/1.17.6 Date: Tue, 07 Jan 2020 14:28:46 GMT Content-Type: text/html Content-Length: 612 Last-Modified: Tue, 19 Nov 2019 12:50:08 GMT Connection: keep-alive ETag: "5dd3e500-264" Accept-Ranges: bytes [root@hdss7-21 ~]# curl -I 172.7.22.2 # 缺少网络插件,无法跨节点通信