• K8S部署之kubeadm


    K8S部署笔记

    一、集群环境说明

    主机名IP地址说明
    k8s-master01 192.168.1.107 master节点
    k8s-master02 192.168.1.108 master节点
    k8s-master03 192.168.1.109 master节点
    k8s-master-lb(在master节点) 192.168.1.236 keepalived虚拟IP
    k8s-node01 192.168.1.110 worker节点
    k8s-node02 192.168.1.111 worker节点
    配置信息备注
    系统版本 CentOS 7.9
    Docker版本 19.03.x
    Pod网段 172.168.0.0/12
    Service网段 10.96.0.0/12
    注意:
    VIP(虚拟IP)不要和公司内网IP重复,首先去ping一下,不通才可用。VIP需要和主机在同一个局域网内!
    

    二、基础环境配置(以下操作所有节点都得执行)

    2.1、配置hosts解析

    [root@k8s-master01 ~]# cat /etc/hosts
    192.168.1.107 k8s-master01
    192.168.1.108 k8s-master02
    192.168.1.109 k8s-master03
    192.168.1.236 k8s-master-lb # 如果不是高可用集群,该IP为Master01的IP
    192.168.1.110 k8s-node01
    192.168.1.111 k8s-node02
    

    2.2、更换yum源码

    curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
    
    yum install -y yum-utils device-mapper-persistent-data lvm2
    
    yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=1
    repo_gpgcheck=1
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    
    sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
    

    2.3、安装常用工具

    yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git lrzsz -y
    

    2.4、关闭防火墙、selinux、dnsmasq、swap

    systemctl disable --now firewalld 
    systemctl disable --now dnsmasq
    systemctl disable --now NetworkManager
    
    setenforce 0
    sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
    sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
    
    关闭swap分区
    swapoff -a && sysctl -w vm.swappiness=0
    sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
    

    2.5、时间同步配置

    # 安装ntpdate
    rpm -ivh http://mirrors.wlnmp.com/centos/wlnmp-release-centos.noarch.rpm
    yum install ntpdate -y
    
    # 更改时区
    ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
    
    # 设置定时任务同步时间
    echo 'Asia/Shanghai' >/etc/timezone
    ntpdate time2.aliyun.com
    # 加入到crontab
    */5 * * * * ntpdate time2.aliyun.com
    

    2.6、优化Linux

    ulimit -SHn 65535
    
    vim /etc/security/limits.conf
    # 末尾添加如下内容
    * soft nofile 655360
    * hard nofile 131072
    * soft nproc 655350
    * hard nproc 655350
    * soft memlock unlimited
    * hard memlock unlimited
    
    # 然后重启Linux
    reboot
    

    2.7、所有节点升级系统并重启,此处升级没有升级内核,下节会单独升级内核:

    # CentOS7需要升级,CentOS8可以按需升级系统
    yum update -y --exclude=kernel* && reboot 
    

    三、内核升级

    3.1、配置免密登录(Master01上)

    Master01节点免密钥登录其他节点,安装过程中生成配置文件和证书均在Master01上操作,集群管理也在Master01上操作,阿里云或者AWS上需要单独一台kubectl服务器。密钥配置如下:
    # 一直回车就行
    ssh-keygen -t rsa 
    
    for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02;do ssh-copy-id -i .ssh/id_rsa.pub $i;done
    

    3.2、下载安装所有的源码文件:(Master01上)

    cd /root/ ; git clone https://github.com/dotbalo/k8s-ha-install.git
    

    3.3、下载升级所需安装包(Master01上)

    CentOS7 需要升级内核至4.18+,本地升级的版本为4.19
    # 在master01节点下载内核
    cd /root
    wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
    wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm
    
    # 从master01节点传到其他节点:
    for i in k8s-master02 k8s-master03 k8s-node01 k8s-node02;do scp kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm $i:/root/ ; done
    

    3.4、内核升级(所有节点)

    cd /root && yum localinstall -y kernel-ml*
    grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg
    grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
    
    # 检查默认内核是不是4.19
    grubby --default-kernel /boot/vmlinuz-4.19.12-1.el7.elrepo.x86_64
    
    # 所有节点重启,然后检查内核是不是4.19
    reboot
    [root@k8s-node02 ~]# uname -a
    Linux k8s-node02 4.19.12-1.el7.elrepo.x86_64 #1 SMP Fri Dec 21 11:06:36 EST 2018 x86_64 x86_64 x86_64 GNU/Linux
    

    3.5、安装ipvsadm(所有节点)

    yum install ipvsadm ipset sysstat conntrack libseccomp -y
    
    所有节点配置ipvs模块,在内核4.19+版本nf_conntrack_ipv4已经改为nf_conntrack, 4.18以下使用nf_conntrack_ipv4即可:
    vim /etc/modules-load.d/ipvs.conf 
    # 加入以下内容
    ip_vs
    ip_vs_lc
    ip_vs_wlc
    ip_vs_rr
    ip_vs_wrr
    ip_vs_lblc
    ip_vs_lblcr
    ip_vs_dh
    ip_vs_sh
    ip_vs_fo
    ip_vs_nq
    ip_vs_sed
    ip_vs_ftp
    ip_vs_sh
    nf_conntrack   # 4.18 改成这个nf_conntrack_ipv4
    ip_tables
    ip_set
    xt_set
    ipt_set
    ipt_rpfilter
    ipt_REJECT
    ipip
    
    # 然后执行
    systemctl enable --now systemd-modules-load.service
    

    3.6、开启一些k8s集群中必须的内核参数,配置k8s内核(所有节点)

    cat <<EOF > /etc/sysctl.d/k8s.conf
    net.ipv4.ip_forward = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    fs.may_detach_mounts = 1
    vm.overcommit_memory=1
    vm.panic_on_oom=0
    fs.inotify.max_user_watches=89100
    fs.file-max=52706963
    fs.nr_open=52706963
    net.netfilter.nf_conntrack_max=2310720
    
    net.ipv4.tcp_keepalive_time = 600
    net.ipv4.tcp_keepalive_probes = 3
    net.ipv4.tcp_keepalive_intvl =15
    net.ipv4.tcp_max_tw_buckets = 36000
    net.ipv4.tcp_tw_reuse = 1
    net.ipv4.tcp_max_orphans = 327680
    net.ipv4.tcp_orphan_retries = 3
    net.ipv4.tcp_syncookies = 1
    net.ipv4.tcp_max_syn_backlog = 16384
    net.ipv4.ip_conntrack_max = 65536
    net.ipv4.tcp_max_syn_backlog = 16384
    net.ipv4.tcp_timestamps = 0
    net.core.somaxconn = 16384
    EOF
    
    # 所有节点配置完内核后,重启服务器,保证重启后内核依旧加载
    reboot
    [root@k8s-master01 ~]# lsmod | grep --color=auto -e ip_vs -e nf_conntrack
    ip_vs_ftp              16384  0 
    nf_nat                 32768  1 ip_vs_ftp
    ip_vs_sed              16384  0 
    ip_vs_nq               16384  0 
    ip_vs_fo               16384  0 
    ip_vs_sh               16384  0 
    ip_vs_dh               16384  0 
    ip_vs_lblcr            16384  0 
    ip_vs_lblc             16384  0 
    ip_vs_wrr              16384  0 
    ip_vs_rr               16384  0 
    ip_vs_wlc              16384  0 
    ip_vs_lc               16384  0 
    ip_vs                 151552  24 ip_vs_wlc,ip_vs_rr,ip_vs_dh,ip_vs_lblcr,ip_vs_sh,ip_vs_fo,ip_vs_nq,ip_vs_lblc,ip_vs_wrr,ip_vs_lc,ip_vs_sed,ip_vs_ftp
    nf_conntrack          143360  2 nf_nat,ip_vs
    nf_defrag_ipv6         20480  1 nf_conntrack
    nf_defrag_ipv4         16384  1 nf_conntrack
    libcrc32c              16384  4 nf_conntrack,nf_nat,xfs,ip_vs
    

    四、基本组件安装

    本节主要安装的是集群中用到的各种组件,比如Docker-ce、Kubernetes各组件等。

    4.1、安装Docker-ce 19.03(所有节点)

    yum install docker-ce-19.03.* -y
    
    4.1.1温馨提示:
    由于新版kubelet建议使用systemd,所以可以把docker的CgroupDriver改成systemd
    mkdir /etc/docker
    cat > /etc/docker/daemon.json <<EOF
    {
      "exec-opts": ["native.cgroupdriver=systemd"]
    }
    EOF
    
    4.1.2、所有节点设置开机自启动Docker
    systemctl daemon-reload && systemctl enable --now docker
    

    4.2、安装k8s组件(所有节点)

    所有节点安装最新版本kubeadm:
    yum list kubeadm.x86_64 --showduplicates | sort -r
    
    yum install kubeadm -y
    
    默认配置的pause镜像使用gcr.io仓库,国内可能无法访问,所以这里配置Kubelet使用阿里云的pause镜像:
    cat >/etc/sysconfig/kubelet<<EOF
    KUBELET_EXTRA_ARGS="--cgroup-driver=systemd --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.2"
    EOF
    
    设置Kubelet开机自启动:
    systemctl daemon-reload
    systemctl enable --now kubelet
    

    五、高可用组件安装(所有Master节点)

    (注意:如果不是高可用集群,haproxy和keepalived无需安装)

    5.1、通过yum安装HAProxy和KeepAlived

    yum install keepalived haproxy -y
    

    5.2、配置HAProxy

    所有Master节点配置HAProxy(详细配置参考HAProxy文档,所有Master节点的HAProxy配置相同):
    mkdir /etc/haproxy
    [root@k8s-master01 ~]# cat /etc/haproxy/haproxy.cfg
    global
      maxconn  2000
      ulimit-n  16384
      log  127.0.0.1 local0 err
      stats timeout 30s
    
    defaults
      log global
      mode  http
      option  httplog
      timeout connect 5000
      timeout client  50000
      timeout server  50000
      timeout http-request 15s
      timeout http-keep-alive 15s
    
    frontend monitor-in
      bind *:33305
      mode http
      option httplog
      monitor-uri /monitor
    
    frontend k8s-master
      bind 0.0.0.0:16443
      bind 127.0.0.1:16443
      mode tcp
      option tcplog
      tcp-request inspect-delay 5s
      default_backend k8s-master
    
    backend k8s-master
      mode tcp
      option tcplog
      option tcp-check
      balance roundrobin
      default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
      server k8s-master01   192.168.1.107:6443  check
      server k8s-master02   192.168.1.108:6443  check
      server k8s-master03   192.168.1.109:6443  check
    

    5.3、配置KeepAlived

    注意每个节点的IP和网卡(interface参数)
    Master01节点的配置:
    mkdir /etc/keepalived
    cat /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    global_defs {
        router_id LVS_DEVEL
    script_user root
        enable_script_security
    }
    vrrp_script chk_apiserver {
        script "/etc/keepalived/check_apiserver.sh"
        interval 5
        weight -5
        fall 2  
    rise 1
    }
    vrrp_instance VI_1 {
        state MASTER
        interface ens33
        mcast_src_ip 192.168.1.107
        virtual_router_id 51
        priority 101
        advert_int 2
        authentication {
            auth_type PASS
            auth_pass K8SHA_KA_AUTH
        }
        virtual_ipaddress {
            192.168.1.236
        }
        track_script {
           chk_apiserver
        }
    }
    
    Master02节点的配置:
    mkdir /etc/keepalived
    cat /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    global_defs {
        router_id LVS_DEVEL
    script_user root
        enable_script_security
    }
    vrrp_script chk_apiserver {
        script "/etc/keepalived/check_apiserver.sh"
       interval 5
        weight -5
        fall 2  
    rise 1
    }
    vrrp_instance VI_1 {
        state BACKUP
        interface ens33
        mcast_src_ip 192.168.1.108
        virtual_router_id 51
        priority 100
        advert_int 2
        authentication {
            auth_type PASS
            auth_pass K8SHA_KA_AUTH
        }
        virtual_ipaddress {
            192.168.1.236
        }
        track_script {
           chk_apiserver
        }
    }
    
    Master03节点的配置:
    mkdir /etc/keepalived
    cat /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    global_defs {
        router_id LVS_DEVEL
    script_user root
        enable_script_security
    }
    vrrp_script chk_apiserver {
        script "/etc/keepalived/check_apiserver.sh"
     interval 5
        weight -5
        fall 2  
    rise 1
    }
    vrrp_instance VI_1 {
        state BACKUP
        interface ens33
        mcast_src_ip 192.168.1.109
        virtual_router_id 51
        priority 100
        advert_int 2
        authentication {
            auth_type PASS
            auth_pass K8SHA_KA_AUTH
        }
        virtual_ipaddress {
            192.168.1.236
        }
        track_script {
           chk_apiserver
        }
    }
    

    5.4、所有master节点配置KeepAlived健康检查文件:

    cat > /etc/keepalived/check_apiserver.sh  << EFO
    #!/bin/bash
    
    err=0
    for k in $(seq 1 3)
    do
        check_code=$(pgrep haproxy)
        if [[ $check_code == "" ]]; then
            err=$(expr $err + 1)
            sleep 1
            continue
        else
            err=0
            break
        fi
    done
    
    if [[ $err != "0" ]]; then
        echo "systemctl stop keepalived"
        /usr/bin/systemctl stop keepalived
        exit 1
    else
        exit 0
    fi
    EFO
    
    5.4.1、脚本授权
    chmod +x /etc/keepalived/check_apiserver.sh
    
    5.4.2、启动haproxy和keepalived
    systemctl daemon-reload
    systemctl enable --now haproxy
    systemctl enable --now keepalived
    

    5.5、如果安装了keepalived和haproxy,需要测试keepalived是否是正常的

    # 测试VIP(任意节点上)
    [root@k8s-node02 ~]# ping 192.168.1.236 -c 4
    PING 192.168.1.236 (192.168.1.236) 56(84) bytes of data.
    64 bytes from 192.168.1.236: icmp_seq=1 ttl=64 time=0.923 ms
    64 bytes from 192.168.1.236: icmp_seq=2 ttl=64 time=0.433 ms
    64 bytes from 192.168.1.236: icmp_seq=3 ttl=64 time=0.554 ms
    64 bytes from 192.168.1.236: icmp_seq=4 ttl=64 time=0.457 ms
    
    --- 192.168.1.236 ping statistics ---
    4 packets transmitted, 4 received, 0% packet loss, time 3061ms
    rtt min/avg/max/mdev = 0.433/0.591/0.923/0.198 ms
    
    # 测试haproxy(任意节点上)
    [root@k8s-node01 ~]# telnet 192.168.1.236 16443
    Trying 192.168.1.236...
    Connected to 192.168.1.236.
    Escape character is '^]'.
    Connection closed by foreign host.
    
    # 排查思路
    如果ping不通且telnet没有出现 ] ,则认为VIP不可以,不可在继续往下执行,需要排查keepalived的问题,比如防火墙和selinux,haproxy和keepalived的状态,监听端口等
    
    所有节点查看防火墙状态必须为disable和inactive:systemctl status firewalld
    
    所有节点查看selinux状态,必须为disable:getenforce
    
    master节点查看haproxy和keepalived状态:systemctl status keepalived haproxy
    
    master节点查看监听端口:netstat -lntp
    

    六、 集群初始化

    官方初始化文档:
    https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/
    

    ​ Master01:(# 注意,如果不是高可用集群,192.168.1.236:16443改为master01的地址16443改为apiserver的端口,默认是6443,注意更改v1.18.5自己服务器kubeadm的版本:kubernetesVersion)

    6.1、Master01节点创建kubeadm-config.yaml配置(Master01上)

    [root@k8s-master01 ~]# cat /root/kubeadm-config.yaml
    apiVersion: kubeadm.k8s.io/v1beta2
    bootstrapTokens:
    - groups:
      - system:bootstrappers:kubeadm:default-node-token
      token: 7t2weq.bjbawausm0jaxury
      ttl: 24h0m0s
      usages:
      - signing
      - authentication
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 192.168.1.107
      bindPort: 6443
    nodeRegistration:
      criSocket: /var/run/dockershim.sock
      name: k8s-master01
      taints:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
    ---
    apiServer:
      certSANs:
      - 192.168.1.236
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controlPlaneEndpoint: 192.168.1.236:16443
    controllerManager: {}
    dns:
      type: CoreDNS
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    kubernetesVersion: v1.20.0      # 改成对应的版本
    networking:
      dnsDomain: cluster.local
      podSubnet: 172.168.0.0/12     # 如果跟公司ip冲突得改
      serviceSubnet: 10.96.0.0/12   # 如果跟公司ip冲突得改
    scheduler: {}
    
    更新kubeadm文件的命令(Master01上)
    # 更新之后的文件是new.yaml
    [root@k8s-master01 ~]# kubeadm config migrate --old-config kubeadm-config.yaml --new-config new.yaml
    

    6.2、将new.yaml文件复制到其他节点(Master01上)

    scp new.yaml k8s-master02:/root/
    scp new.yaml k8s-master03:/root/
    scp new.yaml k8s-node01:/root/
    scp new.yaml k8s-node02:/root/
    

    6.3、下载镜像(Master01、02、03上)

    [root@k8s-master01 ~]# kubeadm config images pull --config /root/new.yaml 
    [config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.20.0
    [config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.20.0
    [config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.20.0
    [config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.20.0
    [config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
    [config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.13-0
    [config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0
    

    6.4、设置开机自启动kubelet(所有节点)

    systemctl enable --now kubelet
    

    6.5、Master01节点初始化,初始化以后会在/etc/kubernetes目录下生成对应的证书和配置文件,之后其他Master节点加入Master01即可:(Master01上)

    kubeadm init --config /root/new.yaml  --upload-certs
    
    [addons] Applied essential addon: CoreDNS
    [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
    [addons] Applied essential addon: kube-proxy
    
    # 初始化成功以后,会产生Token值,用于其他节点加入时使用,因此要记录下初始化成功生成的token值(令牌值):
    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    Alternatively, if you are the root user, you can run:
    
      export KUBECONFIG=/etc/kubernetes/admin.conf
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    You can now join any number of the control-plane node running the following command on each as root:
     
      # 这个就是要在另外2个master节点执行的加入集群的命令
      kubeadm join 192.168.1.236:16443 --token 7t2weq.bjbawausm0jaxury 
        --discovery-token-ca-cert-hash sha256:3aa4cf3c52c1956cb86d2911fe0f6b8898bfa43c06966b2f1095e5000a00d1a4 
        --control-plane --certificate-key b66aa49bb32a780ac5b58841c50a7767a9638a9d313e23c9413218473e527ec2
    
    Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
    As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
    "kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    # 这个就是要在另外2个node节点执行的加入集群的命令
    kubeadm join 192.168.1.236:16443 --token 7t2weq.bjbawausm0jaxury 
        --discovery-token-ca-cert-hash sha256:3aa4cf3c52c1956cb86d2911fe0f6b8898bfa43c06966b2f1095e5000a00d1a4 
    

    6.6、如果初始化失败,重置后再次初始化,命令如下:

    kubeadm reset -f ; ipvsadm --clear ; rm -rf ~/.kube
    

    6.7、Master01节点配置环境变量,用于访问Kubernetes集群(Master01上)

    cat <<EOF >> /root/.bashrc
    export KUBECONFIG=/etc/kubernetes/admin.conf
    EOF
    source /root/.bashrc
    
    # 查看节点状态:
    [root@k8s-master01 ~]# kubectl get nodes
    NAME           STATUS     ROLES                  AGE     VERSION
    k8s-master01   NotReady   control-plane,master   3m24s   v1.20.0
    

    6.8、采用初始化安装方式,所有的系统组件均以容器的方式运行并且在kube-system命名空间内,此时可以查看Pod状态:

    [root@k8s-master01 ~]# kubectl get pods -n kube-system -o wide
    NAME                                   READY   STATUS    RESTARTS   AGE     IP              NODE           NOMINATED NODE   READINESS GATES
    coredns-54d67798b7-8w5hd               0/1     Pending   0          4m18s   <none>          <none>         <none>           <none>
    coredns-54d67798b7-vb2ll               0/1     Pending   0          4m18s   <none>          <none>         <none>           <none>
    etcd-k8s-master01                      1/1     Running   0          4m19s   192.168.1.107   k8s-master01   <none>           <none>
    kube-apiserver-k8s-master01            1/1     Running   0          4m19s   192.168.1.107   k8s-master01   <none>           <none>
    kube-controller-manager-k8s-master01   1/1     Running   0          4m19s   192.168.1.107   k8s-master01   <none>           <none>
    kube-proxy-5bws8                       1/1     Running   0          4m18s   192.168.1.107   k8s-master01   <none>           <none>
    kube-scheduler-k8s-master01            1/1     Running   0          4m19s   192.168.1.107   k8s-master01   <none>           <none>
    

    七、高可用Master

    7.1、matser02节点执行

    [root@k8s-master02 ~]# kubeadm join 192.168.1.236:16443 --token 7t2weq.bjbawausm0jaxury 
        --discovery-token-ca-cert-hash sha256:3aa4cf3c52c1956cb86d2911fe0f6b8898bfa43c06966b2f1095e5000a00d1a4 
        --control-plane --certificate-key b66aa49bb32a780ac5b58841c50a7767a9638a9d313e23c9413218473e527ec2
        
       
        
    # 成功显示,如果想在另外的master节点使用kubectl命令,就执行以下操作。
    To start administering your cluster from this node, you need to run the following as a regular user:
    
            mkdir -p $HOME/.kube
            sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
            sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    Run 'kubectl get nodes' to see this node join the cluster.
    
    # matser01节点查看,已经成功加入集群
    [root@k8s-master01 ~]# kubectl get nodes
    NAME           STATUS     ROLES                  AGE     VERSION
    k8s-master01   NotReady   control-plane,master   16m     v1.20.0
    k8s-master02   NotReady   control-plane,master   2m21s   v1.20.0
    

    7.2、如果证书失效、采用以下方法重新生成证书(master01上)

    # Token过期后生成新的token:
    [root@k8s-master01 ~]# kubeadm token create --print-join-command
    kubeadm join 192.168.1.236:16443 --token 8k8qzk.d43ed9gfgw1st3xi     --discovery-token-ca-cert-hash sha256:3aa4cf3c52c1956cb86d2911fe0f6b8898bfa43c06966b2f1095e5000a00d1a4 
    
    # Master需要生成--certificate-key
    [root@k8s-master01 ~]# kubeadm init phase upload-certs  --upload-certs
    [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
    [upload-certs] Using certificate key:
    43c5695789c0dc4433f480a05683d55887e836b71b452b407138d8dd54cad937
    
    7.2.1、把master03节点加入集群(master03上)
    # 执行(kubeadm token create --print-join-command)后生成的命令+执行(kubeadm init phase upload-certs  --upload-certs)生成的key替换到参数(--control-plane --certificate-key)然后在master03执行
    kubeadm join 192.168.1.236:16443 --token 8k8qzk.d43ed9gfgw1st3xi    
       --discovery-token-ca-cert-hash sha256:3aa4cf3c52c1956cb86d2911fe0f6b8898bfa43c06966b2f1095e5000a00d1a4  
       --control-plane --certificate-key 43c5695789c0dc4433f480a05683d55887e836b71b452b407138d8dd54cad937
    

    7.3、把node节点加入集群(node01、02上)

    ​ Node节点上主要部署公司的一些业务应用,生产环境中不建议Master节点部署系统组件之外的其他Pod,测试环境可以允许Master节点部署Pod以节省系统资源。

    # 只要执行(kubeadm token create --print-join-command)后生成的命令
    kubeadm join 192.168.1.236:16443 --token 8k8qzk.d43ed9gfgw1st3xi    
       --discovery-token-ca-cert-hash sha256:3aa4cf3c52c1956cb86d2911fe0f6b8898bfa43c06966b2f1095e5000a00d1a4
       
       
    # 看到以下的就是成功了
    This node has joined the cluster:
    * Certificate signing request was sent to apiserver and a response was received.
    * The Kubelet was informed of the new secure connection details.
    
    Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
    
    # 查看master01上查看节点信息
    [root@k8s-master01 ~]# kubectl get nodes
    NAME           STATUS     ROLES                  AGE    VERSION
    k8s-master01   NotReady   control-plane,master   33m    v1.20.0
    k8s-master02   NotReady   control-plane,master   19m    v1.20.0
    k8s-master03   NotReady   control-plane,master   10m    v1.20.0
    k8s-node01     NotReady   <none>                 116s   v1.20.0
    k8s-node02     NotReady   <none>                 113s   v1.20.0
    

    报错:

    error execution phase preflight: [preflight] Some fatal errors occurred:
            [ERROR DirAvailable--etc-kubernetes-manifests]: /etc/kubernetes/manifests is not empty
            [ERROR FileAvailable--etc-kubernetes-kubelet.conf]: /etc/kubernetes/kubelet.conf already exists
            [ERROR Port-10250]: Port 10250 is in use
            
    # /etc/kubernetes/manifests  删除这个目录
    # 10250停了这端口
    

    八、 Calico组件的安装(master01执行)

    8.1、切换分支

    [root@k8s-master01 ~]# cd /root/k8s-ha-install && git checkout manual-installation-v1.20.x && cd calico/
    Already on 'manual-installation-v1.20.x'
    

    8.2、修改calico-etcd.yaml的以下位置

    sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "https://192.168.1.107:2379,https://192.168.1.108:2379,https://192.168.1.109:2379"#g' calico-etcd.yaml
    
    ETCD_CA=`cat /etc/kubernetes/pki/etcd/ca.crt | base64 | tr -d '
    '`
    ETCD_CERT=`cat /etc/kubernetes/pki/etcd/server.crt | base64 | tr -d '
    '`
    ETCD_KEY=`cat /etc/kubernetes/pki/etcd/server.key | base64 | tr -d '
    '`
    sed -i "s@# etcd-key: null@etcd-key: ${ETCD_KEY}@g; s@# etcd-cert: null@etcd-cert: ${ETCD_CERT}@g; s@# etcd-ca: null@etcd-ca: ${ETCD_CA}@g" calico-etcd.yaml
    
    sed -i 's#etcd_ca: ""#etcd_ca: "/calico-secrets/etcd-ca"#g; s#etcd_cert: ""#etcd_cert: "/calico-secrets/etcd-cert"#g; s#etcd_key: "" #etcd_key: "/calico-secrets/etcd-key" #g' calico-etcd.yaml
    
    POD_SUBNET=`cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep cluster-cidr= | awk -F= '{print $NF}'`
    
    sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g; s@#   value: "192.168.1.0/16"@  value: '"${POD_SUBNET}"'@g' calico-etcd.yaml
    
    [root@k8s-master01 calico]# kubectl apply -f calico-etcd.yaml
    secret/calico-etcd-secrets created
    configmap/calico-config created
    clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
    clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
    clusterrole.rbac.authorization.k8s.io/calico-node created
    clusterrolebinding.rbac.authorization.k8s.io/calico-node created
    daemonset.apps/calico-node created
    serviceaccount/calico-node created
    deployment.apps/calico-kube-controllers created
    serviceaccount/calico-kube-controllers created
    

    查看容器状态(需等待几分钟)

    [root@k8s-master01 dashboard]# kubectl  get po -n kube-system   
    NAME                                       READY   STATUS    RESTARTS   AGE
    calico-kube-controllers-5f6d4b864b-khq4h   1/1     Running   0          13m
    calico-node-5tvxh                          1/1     Running   0          13m
    calico-node-kffn7                          1/1     Running   0          13m
    calico-node-lltfs                          1/1     Running   0          13m
    calico-node-nhgn8                          1/1     Running   0          13m
    coredns-54d67798b7-8w5hd                   1/1     Running   0          117m
    coredns-54d67798b7-vb2ll                   1/1     Running   0          117m
    etcd-k8s-master01                          1/1     Running   0          117m
    etcd-k8s-master02                          1/1     Running   0          104m
    kube-apiserver-k8s-master01                1/1     Running   0          117m
    kube-apiserver-k8s-master02                1/1     Running   0          104m
    kube-controller-manager-k8s-master01       1/1     Running   1          117m
    kube-controller-manager-k8s-master02       1/1     Running   0          104m
    kube-proxy-5bws8                           1/1     Running   0          117m
    kube-proxy-pbqjc                           1/1     Running   0          104m
    kube-proxy-tpwbt                           1/1     Running   0          86m
    kube-proxy-vbpc5                           1/1     Running   0          86m
    kube-scheduler-k8s-master01                1/1     Running   1          117m
    kube-scheduler-k8s-master02                1/1     Running   0          104m
    metrics-server-545b8b99c6-hkgnz            1/1     Running   0          2m38s
    

    九、Metrics部署

    在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率。

    9.1、将Master01节点的front-proxy-ca.crt复制到所有Node节点(Master01上)

    # 其他节点自行拷贝
    scp /etc/kubernetes/pki/front-proxy-ca.crt k8s-node01:/etc/kubernetes/pki/front-proxy-ca.crt
    
    scp /etc/kubernetes/pki/front-proxy-ca.crt k8s-node02:/etc/kubernetes/pki/front-proxy-ca.crt
    

    9.2、安装metrics server

    [root@k8s-master01 calico]# cd /root/k8s-ha-install/metrics-server-0.4.x-kubeadm/
    
    [root@k8s-master01 metrics-server-0.4.x-kubeadm]# kubectl  create -f comp.yaml 
    serviceaccount/metrics-server created
    clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
    clusterrole.rbac.authorization.k8s.io/system:metrics-server created
    rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
    clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
    clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
    service/metrics-server created
    deployment.apps/metrics-server created
    apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
    

    9.3、查看状态

    [root@k8s-master01 metrics-server-0.4.x-kubeadm]# kubectl  top node
    NAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
    k8s-master01   374m         18%    1158Mi          62%       
    k8s-master02   354m         17%    1027Mi          54%
    k8s-master03   104m         2%     1082Mi          28% 
    k8s-node01     188m         9%     773Mi           41%       
    k8s-node02     446m         22%    803Mi           43
    

    10、Dashboard部署

    Dashboard用于展示集群中的各类资源,同时也可以通过Dashboard实时查看Pod的日志和在容器中执行一些命令等。

    10.1、安装指定版本dashboard

    [root@k8s-master01 metrics-server-0.4.x-kubeadm]# cd /root/k8s-ha-install/dashboard/
    
    [root@k8s-master01 dashboard]# kubectl  create -f .
    serviceaccount/admin-user created
    clusterrolebinding.rbac.authorization.k8s.io/admin-user created
    namespace/kubernetes-dashboard created
    serviceaccount/kubernetes-dashboard created
    service/kubernetes-dashboard created
    secret/kubernetes-dashboard-certs created
    secret/kubernetes-dashboard-csrf created
    secret/kubernetes-dashboard-key-holder created
    configmap/kubernetes-dashboard-settings created
    role.rbac.authorization.k8s.io/kubernetes-dashboard created
    clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
    rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
    clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
    deployment.apps/kubernetes-dashboard created
    service/dashboard-metrics-scraper created
    deployment.apps/dashboard-metrics-scraper created
    
    # 查看所有容器状态
    [root@k8s-master01 dashboard]# kubectl get po -A
    NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE
    kube-system            calico-kube-controllers-5f6d4b864b-khq4h     1/1     Running   0          16m
    kube-system            calico-node-5tvxh                            1/1     Running   0          16m
    kube-system            calico-node-kffn7                            1/1     Running   0          16m
    kube-system            calico-node-lltfs                            1/1     Running   0          16m
    kube-system            calico-node-nhgn8                            1/1     Running   0          16m
    kube-system            coredns-54d67798b7-8w5hd                     1/1     Running   0          121m
    kube-system            coredns-54d67798b7-vb2ll                     1/1     Running   0          121m
    kube-system            etcd-k8s-master01                            1/1     Running   0          121m
    kube-system            etcd-k8s-master02                            1/1     Running   0          107m
    kube-system            kube-apiserver-k8s-master01                  1/1     Running   0          121m
    kube-system            kube-apiserver-k8s-master02                  1/1     Running   0          107m
    kube-system            kube-controller-manager-k8s-master01         1/1     Running   1          121m
    kube-system            kube-controller-manager-k8s-master02         1/1     Running   0          107m
    kube-system            kube-proxy-5bws8                             1/1     Running   0          121m
    kube-system            kube-proxy-pbqjc                             1/1     Running   0          107m
    kube-system            kube-proxy-tpwbt                             1/1     Running   0          90m
    kube-system            kube-proxy-vbpc5                             1/1     Running   0          90m
    kube-system            kube-scheduler-k8s-master01                  1/1     Running   1          121m
    kube-system            kube-scheduler-k8s-master02                  1/1     Running   0          107m
    kube-system            metrics-server-545b8b99c6-hkgnz              1/1     Running   0          6m25s
    kubernetes-dashboard   dashboard-metrics-scraper-7645f69d8c-hftvd   1/1     Running   0          2m31s
    kubernetes-dashboard   kubernetes-dashboard-78cb679857-zksm9        1/1     Running   0          2m32s
    

    10.2、更改dashboard的svc为NodePort

    #type的ClusterIP更改为NodePort(如果已经为NodePort忽略此步骤):
    [root@k8s-master01 dashboard]# kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
    type: NodePort
    

    10.3、登录dashboard

    # 查看dashboard信息,端口映射
    [root@k8s-master01 dashboard]# kubectl get svc kubernetes-dashboard  -n  kubernetes-dashboard
    NAME                   TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
    kubernetes-dashboard   NodePort   10.111.114.182   <none>        443:32209/TCP   10m
    
    # 页面访问,VIP:映射的端口
    https://192.168.1.236:32209/
    
    # 获取token
    [root@k8s-master01 dashboard]# kubectl -n kube-system describe secret 
    

    十一、一些必须的配置更改

    11.1、将Kube-proxy改为ipvs模式,因为在初始化集群的时候注释了ipvs配置,所以需要自行修改一下:(master01上)

    # 查看原来的mode模式
    [root@k8s-master01 ~]# curl 127.0.0.1:10249/proxyMode
    iptables
    
    # 修改
    [root@k8s-master01 ~]# kubectl edit cm kube-proxy -n kube-system
    mode: "ipvs"  # 44行
    
    # 更新Kube-Proxy的Pod
    [root@k8s-master01 ~]# kubectl patch daemonset kube-proxy -p "{"spec":{"template":{"metadata":{"annotations":{"date":"`date +'%s'`"}}}}}" -n kube-system
    daemonset.apps/kube-proxy patched
    
    # 再次查看mode模式
    [root@k8s-master01 ~]# curl 127.0.0.1:10249/proxyMode
    ipvs
    

    十二、注意事项

    注意:kubeadm安装的集群,证书有效期默认是一年。master节点的kube-apiserver、kube-scheduler、kube-controller-manager、etcd都是以容器运行的。可以通过kubectl get po -n kube-system查看。
    
    启动和二进制不同的是,
    
    kubelet的配置文件在/etc/sysconfig/kubelet和/var/lib/kubelet/config.yaml
    
    其他组件的配置文件在/etc/Kubernetes/manifests目录下,比如kube-apiserver.yaml,该yaml文件更改后,kubelet会自动刷新配置,也就是会重启pod。不能再次创建该文件
    

    Kubeadm安装后,master节点默认不允许部署pod,可以通过以下方式打开:

    # 查看Taints:
    [root@k8s-master01 ~]# kubectl describe node -l node-role.kubernetes.io/master= | grep Taints
    Taints:             node-role.kubernetes.io/master:NoSchedule
    Taints:             node-role.kubernetes.io/master:NoSchedule
    Taints:       node-role.kubernetes.io/master:NoSchedule
    
    # 删除Taint:
    
    [root@k8s-master01 ~]# kubectl taint node -l node-role.kubernetes.io/master node-role.kubernetes.io/master:NoSchedule-
    node/k8s-master01 untainted
    node/k8s-master02 untainted
    node/k8s-master03 untainted
    
    [root@k8s-master01 ~]# kubectl describe node -l node-role.kubernetes.io/master= | grep Taints
    Taints:       <none>
    Taints:       <none>
    Taints:       <none>

    原文地址:https://www.cnblogs.com/hsyw/p/14162437.html
  • 相关阅读:
    随机读取记录
    js 抓取Google词库内容(仅供学习)
    分组统查询
    计时执行后台方法2
    js 调用WinForm角本
    vs2005 确保已安装文件类型(*.*)错误
    原创:SQL Server 数据库附加分离类
    Ajax Js倒计时执行后台方法
    空间前方交会和空间后方交会的区别
    cesium支持的3d模型格式
  • 原文地址:https://www.cnblogs.com/hushaojie/p/14415876.html
Copyright © 2020-2023  润新知