• k8s高可用的安装


    一、系统初始化

    systemctl status firewalld

    systemctl status iptables

    1.1设置系统主机名以及 Host 文件的相互解析

    hostnamectl set-hostname k8s-master01

    安装依赖包

    yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wgetvimnet-tools git

    设置防火墙为 Iptables 并设置空规则

    systemctl  stop firewalld && systemctl disable firewalld

    yum -y install iptables-services && systemctl  start iptables && systemctl enable iptables&& iptables -F &&  serviceiptables save

    设置防火墙为 Iptables 并设置空规则

    systemctl  stop firewalld && systemctl disable firewalld

    yum -y install iptables-services && systemctl  start iptables && systemctl enable iptables&& iptables -F &&  service iptables save

    关闭 SELINUX

    swapoff -a && sed -i '/ swap / s/^(.*)$/#1/g' /etc/fstab

    setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

    调整内核参数,对于 K8S

    cat > kubernetes.conf <<EOF
    net.bridge.bridge-nf-call-iptables=1
    net.bridge.bridge-nf-call-ip6tables=1
    net.ipv4.ip_forward=1
    net.ipv4.tcp_tw_recycle=0
    vm.swappiness=0
    vm.overcommit_memory=1
    vm.panic_on_oom=0
    fs.inotify.max_user_instances=8192
    fs.inotify.max_user_watches=1048576
    fs.file-max=52706963
    fs.nr_open=52706963
    net.ipv6.conf.all.disable_ipv6=1
    net.netfilter.nf_conntrack_max=2310720
    EOF

    cp kubernetes.conf /etc/sysctl.d/kubernetes.conf

    sysctl -p /etc/sysctl.d/kubernetes.conf

    调整系统时区

    # 设置系统时区为中国/上海
    timedatectl set-timezone Asia/Shanghai
    # 将当前的 UTC 时间写入硬件时钟
    timedatectl set-local-rtc 0
    # 重启依赖于系统时间的服务
    systemctl restart rsyslog
    systemctl restart crond

    关闭系统不需要服务

    systemctl stop postfix && systemctl disable postfix

    设置 rsyslogd  systemd journald

    mkdir /var/log/journal # 持久化保存日志的目录
    mkdir /etc/systemd/journald.conf.d
    cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
    [Journal]
    # 持久化保存到磁盘
    Storage=persistent
    
    # 压缩历史日志
    Compress=yes
    
    SyncIntervalSec=5m
    RateLimitInterval=30s
    RateLimitBurst=1000
    
    # 最大占用空间 10G
    SystemMaxUse=10G
    
    # 单日志文件最大 200M
    SystemMaxFileSize=200M
    
    # 日志保存时间 2 周
    MaxRetentionSec=2week
    
    # 不将日志转发到 syslog
    ForwardToSyslog=no
    EOF
    systemctl restart systemd-journald

    升级系统内核为 4.44

    CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 DockerKubernetes 不稳定,例如: rpm -Uvhhttp://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

    rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
    # 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装一次!
    yum --enablerepo=elrepo-kernel install -y kernel-lt
    # 设置开机从新内核启动
    grub2-set-default "CentOS Linux (4.4.182-1.el7.elrepo.x86_64) 7 (Core)"
    # 重启后安装内核源文件
    yum --enablerepo=elrepo-kernel install kernel-lt-devel-$(uname -r) kernel-lt-headers-$(uname -r)

    关闭 NUMA

    cp /etc/default/grub{,.bak}
    vim /etc/default/grub # 在 GRUB_CMDLINE_LINUX 一行添加 `numa=off` 参数,如下所示:
    diff /etc/default/grub.bak /etc/default/grub
    6c6
    < GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rhgb quiet"
    ---
    > GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rhgb quiet numa=off"
    cp /boot/grub2/grub.cfg{,.bak}
    grub2-mkconfig -o /boot/grub2/grub.cfg

    kube-proxy开启ipvs的前置条件

    modprobe br_netfilter
    cat > /etc/sysconfig/modules/ipvs.modules <<EOF
    #!/bin/bash
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    chmod 755 /etc/sysconfig/modules/ipvs.modules
    bash /etc/sysconfig/modules/ipvs.modules 
    lsmod | grep -e ip_vs -e nf_conntrack_ipv4

    安装 Docker 软件

    yum install -y yum-utils device-mapper-persistent-data lvm2
    
    

      yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

    yum update -y && yum install -y docker-ce 2021.5.31.17:33
    
    ## 创建 /etc/docker 目录
    mkdir /etc/docker
    
    # 配置 daemon.
    
    cat > /etc/docker/daemon.json <<EOF
    {
       "exec-opts": ["native.cgroupdriver=systemd"],
       "log-driver": "json-file",
       "log-opts": {
          "max-size": "100m"
      }
    }
    EOF
    
    mkdir-p /etc/systemd/system/docker.service.d
    
    # 重启docker服务
    systemctl daemon-reload && systemctl restart docker && systemctl enable docker

    创建k8s的镜像文件目录

    [root@k8s-master01 ~]# mkdir /usr/local/kunernetes
    [root@k8s-master01 ~]# cd !$
    cd /usr/local/kunernetes
    [root@k8s-master01 kunernetes]# mkdir install
    [root@k8s-master01 kunernetes]# cd install/
    [root@k8s-master01 install]# 

    用睿云的haproxy和keepalived实现高可用,在主节点启动 Haproxy  Keepalived 容器

    [root@k8s-master01 install]# ll
    总用量 1007276
    -rw-r--r-- 1 root root  45365760 6月   1 10:33 coredns-1.7.0.tar.gz
    -rw-r--r-- 1 root root 254629888 6月   1 10:33 etcd-3.4.9-1.tar.gz
    -rw-r--r-- 1 root root  74437120 6月   1 10:36 haproxy.tar
    -rw-r--r-- 1 root root  16337408 6月   1 10:36 keepalived.tar
    -rw-r--r-- 1 root root 241260752 6月   1 10:36 kubeadm-basic.images.tar.gz
    -rw-r--r-- 1 root root 120040960 6月   1 10:33 kube-apiserver-v1.19.0.tar.gz
    -rw-r--r-- 1 root root 112045568 6月   1 10:34 kube-controller-manager-v1.19.0.tar.gz
    -rw-r--r-- 1 root root 119695360 6月   1 10:34 kube-proxy-v1.19.0.tar.gz
    -rw-r--r-- 1 root root  46919168 6月   1 10:34 kube-scheduler-v1.19.0.tar.gz
    -rw-r--r-- 1 root root       201 6月   1 10:36 load-images.sh
    -rw-r--r-- 1 root root    692736 6月   1 10:34 pause-3.2.tar.gz
    -rw-r--r-- 1 root root      1480 6月   1 10:36 start.keep.tar.gz  keepalived的启动脚本

    执行load-images.sh导入镜像

    [root@k8s-master01 install]# ./load-images.sh 
    225df95e717c: Loading layer [==================================================>]  336.4kB/336.4kB
    96d17b0b58a7: Loading layer [==================================================>]  45.02MB/45.02MB
    Loaded image: k8s.gcr.io/coredns:1.7.0
    0d1435bd79e4: Loading layer [==================================================>]  3.062MB/3.062MB
    2aef7a73d4b0: Loading layer [==================================================>]   2.13MB/2.13MB
    ec3830e15d9c: Loading layer [==================================================>]  225.3MB/225.3MB
    4d5d7883c216: Loading layer [==================================================>]   2.19MB/2.19MB
    5d3a32005e6b: Loading layer [==================================================>]  21.95MB/21.95MB
    Loaded image: k8s.gcr.io/etcd:3.4.9-1
    d626a8ad97a1: Loading layer [==================================================>]  58.46MB/58.46MB
    0f1be9219d57: Loading layer [==================================================>]  5.561MB/5.561MB
    98ad91eed212: Loading layer [==================================================>]  10.38MB/10.38MB
    13591054b0ff: Loading layer [==================================================>]  2.048kB/2.048kB
    2cf20655563d: Loading layer [==================================================>]  4.608kB/4.608kB
    bbfd0b97d06a: Loading layer [==================================================>]   2.56kB/2.56kB
    Loaded image: wise2c/haproxy-k8s:latest
    cd7100a72410: Loading layer [==================================================>]  4.403MB/4.403MB
    2d57ca229959: Loading layer [==================================================>]   11.9MB/11.9MB
    e0fcdac3c7fa: Loading layer [==================================================>]  6.144kB/6.144kB
    fb812c031d86: Loading layer [==================================================>]  3.584kB/3.584kB
    Loaded image: wise2c/keepalived-k8s:latest
    79d541cda6cb: Loading layer [==================================================>]  3.041MB/3.041MB
    e9933a1f21f5: Loading layer [==================================================>]  1.734MB/1.734MB
    c3a6120d2fd6: Loading layer [==================================================>]  115.2MB/115.2MB
    Loaded image: k8s.gcr.io/kube-apiserver:v1.19.0
    d85a13cfa53e: Loading layer [==================================================>]  107.3MB/107.3MB
    Loaded image: k8s.gcr.io/kube-controller-manager:v1.19.0
    91e3a07063b3: Loading layer [==================================================>]  53.89MB/53.89MB
    b4e54f331697: Loading layer [==================================================>]  21.78MB/21.78MB
    b9b82a97c787: Loading layer [==================================================>]  5.168MB/5.168MB
    1b55846906e8: Loading layer [==================================================>]  4.608kB/4.608kB
    061bfb5cb861: Loading layer [==================================================>]  8.192kB/8.192kB
    78dd6c0504a7: Loading layer [==================================================>]  8.704kB/8.704kB
    f83925edb29c: Loading layer [==================================================>]  38.81MB/38.81MB
    Loaded image: k8s.gcr.io/kube-proxy:v1.19.0
    a2a6ea4dde58: Loading layer [==================================================>]  42.13MB/42.13MB
    Loaded image: k8s.gcr.io/kube-scheduler:v1.19.0
    ba0dae6243cc: Loading layer [==================================================>]  684.5kB/684.5kB
    Loaded image: k8s.gcr.io/pause:3.2
    open /var/lib/docker/tmp/docker-import-947428489/data/json: no such file or directory
    [root@k8s-master01 install]# docker images
    REPOSITORY                           TAG       IMAGE ID       CREATED         SIZE
    k8s.gcr.io/kube-proxy                v1.19.0   bc9c328f379c   9 months ago    118MB
    k8s.gcr.io/kube-apiserver            v1.19.0   1b74e93ece2f   9 months ago    119MB
    k8s.gcr.io/kube-controller-manager   v1.19.0   09d665d529d0   9 months ago    111MB
    k8s.gcr.io/kube-scheduler            v1.19.0   cbdc8369d8b1   9 months ago    45.7MB
    k8s.gcr.io/etcd                      3.4.9-1   d4ca8726196c   11 months ago   253MB
    k8s.gcr.io/coredns                   1.7.0     bfe3a36ebd25   11 months ago   45.2MB
    k8s.gcr.io/pause                     3.2       80d28bedfe5d   15 months ago   683kB
    wise2c/keepalived-k8s                latest    0ba6a7862982   2 years ago     14MB
    wise2c/haproxy-k8s                   latest    fde31577093d   3 years ago     71.1MB
    [root@k8s-master01 install]# 
    [root@k8s-master01 install]# tar -zxvf start.keep.tar.gz 
    data/
    data/lb/
    data/lb/start-keepalived.sh
    data/lb/kubeadm-config.yaml
    data/lb/etc/
    data/lb/etc/haproxy.cfg
    data/lb/start-haproxy.sh
    [root@k8s-master01 install]# mv data/ / 移到根目录下。
    [root@k8s-master01 install]# cd /data/
    [root@k8s-master01 data]# 
    [root@k8s-master01 data]# cd lb/
    [root@k8s-master01 lb]# ll
    总用量 12
    drwxr-xr-x 2 root root  25 8月   5 2019 etc
    -rw-r--r-- 1 root root 832 8月   5 2019 kubeadm-config.yaml
    -rwxr-xr-x 1 root root 401 8月   5 2019 start-haproxy.sh
    -rwxr-xr-x 1 root root 486 8月   5 2019 start-keepalived.sh
    [root@k8s-master01 lb]# 
    [root@k8s-master01 lb]# vim etc/haproxy.cfg 
    [root@k8s-master01 lb]# vim start-haproxy.sh 
    [root@k8s-master01 lb]# ./start-haproxy.sh 
    0ad4095cd2617d0b0f3470ebec7be4703d9b2e74ced458be20febd94de57434e
    [root@k8s-master01 lb]# netstat -anpt|grep 6444
    tcp        0      0 0.0.0.0:6444            0.0.0.0:*               LISTEN      15081/docker-proxy  
    tcp6       0      0 :::6444                 :::*                    LISTEN      15086/docker-proxy  
    [root@k8s-master01 lb]# 
    
    配置文件中的地址信息配置成master的三个地址
    [root@k8s-master01 lb]# cat start-keepalived.sh 
    #!/bin/bash
    VIRTUAL_IP=10.0.110.100
    INTERFACE=ens160
    NETMASK_BIT=24
    CHECK_PORT=6444
    RID=10
    VRID=160
    MCAST_GROUP=224.0.0.18
    
    docker run -itd --restart=always --name=Keepalived-K8S 
            --net=host --cap-add=NET_ADMIN 
            -e VIRTUAL_IP=$VIRTUAL_IP 
            -e INTERFACE=$INTERFACE 
            -e CHECK_PORT=$CHECK_PORT 
            -e RID=$RID 
            -e VRID=$VRID 
            -e NETMASK_BIT=$NETMASK_BIT 
            -e MCAST_GROUP=$MCAST_GROUP 
            wise2c/keepalived-k8s
    [root@k8s-master01 lb]# 
    
    修改VIP的地址和网卡名称。

    [root@k8s-master01 lb]# ./start-keepalived.sh

     
    [root@k8s-master01 lb]# ./start-keepalived.sh 
    83c857f2e1a97be46184198fed727bcd264b852bca23256f22dd91c77e5aec74
    [root@k8s-master01 lb]# ip addr show
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
           valid_lft forever preferred_lft forever
    2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
        link/ether 00:0c:29:9f:be:80 brd ff:ff:ff:ff:ff:ff
        inet 10.0.110.171/24 brd 10.0.110.255 scope global noprefixroute ens160
           valid_lft forever preferred_lft forever
        inet 10.0.110.100/24 scope global secondary ens160 已经起来了。
           valid_lft forever preferred_lft forever
    3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default 
        link/ether 02:42:76:86:a6:72 brd ff:ff:ff:ff:ff:ff
        inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
           valid_lft forever preferred_lft forever
    5: veth1fe44fa@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default 
        link/ether 56:75:d7:21:b3:0b brd ff:ff:ff:ff:ff:ff link-netnsid 0
    [root@k8s-master01 lb]# 

    安装 Kubeadm (主从配置)

    # 设置阿里云代理
    cat > /etc/yum.repos.d/kubernetes.repo <<EOF
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=1
    repo_gpgcheck=1
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    

    yum -y  install kubeadm-1.19.0 kubectl-1.19.0 kubelet-1.19.0

    
    # yum 安装 kubelet/kubectl/kubeadm
    mkdir ~/kubeadm/kubelet && cd ~/kubeadm/kubelet
    
    RELEASE=1.19.0
    
    yum -y install kubeadm-${RELEASE} kubelet-${RELEASE} kubectl-${RELEASE} && systemctl enable --now kubelet

    设置Master节点

    kubeadm config print init-defaults --component-configs KubeProxyConfiguration --component-configs KubeletConfiguration  > kubeadm.yaml
    
    [root@k8s-master01 yaml]# cat kubeadm.yaml 
    apiVersion: kubeadm.k8s.io/v1beta2
    bootstrapTokens:
    - groups:
      - system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef
      ttl: 24h0m0s
      usages:
      - signing
      - authentication
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 10.0.110.171
      bindPort: 6443
    nodeRegistration:
      criSocket: /var/run/dockershim.sock
      name: k8s-master01
      taints:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
    ---
    apiServer:
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controlPlaneEndpoint: "10.0.110.100:6444 "
    controllerManager: {}
    dns:
      type: CoreDNS
    etcd:
      local:
    --upload-certs
    [root@k8s-master01 yaml]# kubeadm init --config=kubeadm.yaml --upload-certs | tee kubeadm-init.log
    W0601 14:38:40.957139   25160 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
    [init] Using Kubernetes version: v1.19.0
    [preflight] Running pre-flight checks
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.6. Latest validated version: 19.03
    [preflight] Pulling images required for setting up a Kubernetes cluster
    [preflight] This might take a minute or two, depending on the speed of your internet connection
    [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
    [certs] Using certificateDir folder "/etc/kubernetes/pki"
    [certs] Generating "ca" certificate and key
    [certs] Generating "apiserver" certificate and key
    [certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.110.171 10.0.110.100]
    [certs] Generating "apiserver-kubelet-client" certificate and key
    [certs] Generating "front-proxy-ca" certificate and key
    [certs] Generating "front-proxy-client" certificate and key
    [certs] Generating "etcd/ca" certificate and key
    [certs] Generating "etcd/server" certificate and key
    [certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [10.0.110.171 127.0.0.1 ::1]
    [certs] Generating "etcd/peer" certificate and key
    [certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [10.0.110.171 127.0.0.1 ::1]
    [certs] Generating "etcd/healthcheck-client" certificate and key
    [certs] Generating "apiserver-etcd-client" certificate and key
    [certs] Generating "sa" key and public key
    [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
    [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
    [kubeconfig] Writing "admin.conf" kubeconfig file
    [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
    [kubeconfig] Writing "kubelet.conf" kubeconfig file
    [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
    [kubeconfig] Writing "controller-manager.conf" kubeconfig file
    [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
    [kubeconfig] Writing "scheduler.conf" kubeconfig file
    [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
    [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
    [kubelet-start] Starting the kubelet
    [control-plane] Using manifest folder "/etc/kubernetes/manifests"
    [control-plane] Creating static Pod manifest for "kube-apiserver"
    [control-plane] Creating static Pod manifest for "kube-controller-manager"
    [control-plane] Creating static Pod manifest for "kube-scheduler"
    [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
    [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
    [kubelet-check] Initial timeout of 40s passed.
    [apiclient] All control plane components are healthy after 53.835690 seconds
    [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
    [kubelet] Creating a ConfigMap "kubelet-config-1.19" in namespace kube-system with the configuration for the kubelets in the cluster
    [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
    [upload-certs] Using certificate key:
    d10887cf12587c861e56bce4045242789cd8e67ad262912fc8ef1b567318b2ef
    [mark-control-plane] Marking the node k8s-master01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
    [mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
    [bootstrap-token] Using token: abcdef.0123456789abcdef
    [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
    [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
    [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
    [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
    [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
    [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
    [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
    [addons] Applied essential addon: CoreDNS
    [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
    [addons] Applied essential addon: kube-proxy
    
    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    You can now join any number of the control-plane node running the following command on each as root:
    
      kubeadm join 10.0.110.100:6444 --token abcdef.0123456789abcdef 
        --discovery-token-ca-cert-hash sha256:854ae036c009b57ca3deb93dee8d9e524096f95dae883240c6345a1347dadf1c 
        --control-plane --certificate-key d10887cf12587c861e56bce4045242789cd8e67ad262912fc8ef1b567318b2ef
    
    Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
    As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
    "kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 10.0.110.100:6444 --token abcdef.0123456789abcdef 
        --discovery-token-ca-cert-hash sha256:854ae036c009b57ca3deb93dee8d9e524096f95dae883240c6345a1347dadf1c 
    [root@k8s-master01 yaml]# 
    [root@k8s-master01 yaml]# vim /root/.kube/config 
    [root@k8s-master01 yaml]# kubectl get node
    NAME           STATUS     ROLES    AGE     VERSION
    k8s-master01   NotReady   master   4m41s   v1.19.0
    [root@k8s-master01 yaml]# 

    其他从节点安装yum -y  install kubeadm-1.19.0 kubectl-1.19.0 kubelet-1.19.0

    然后运行

    kubeadm join 10.0.110.100:6444 --token abcdef.0123456789abcdef 
        --discovery-token-ca-cert-hash sha256:854ae036c009b57ca3deb93dee8d9e524096f95dae883240c6345a1347dadf1c 
        --control-plane --certificate-key d10887cf12587c861e56bce4045242789cd8e67ad262912fc8ef1b567318b2ef
    [root@k8s-master02 ~]# mkdir -p $HOME/.kube
    [root@k8s-master02 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    [root@k8s-master02 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
    [root@k8s-master02 ~]# kubectl get node
    NAME           STATUS     ROLES    AGE    VERSION
    k8s-master01   NotReady   master   48m    v1.19.0
    k8s-master02   NotReady   master   2m6s   v1.19.0
    k8s-master03   NotReady   master   116s   v1.19.0
    [root@k8s-master02 ~]# 
    [root@k8s-master01 ~]# kubectl get pod -n kube-system
    NAME                                   READY   STATUS    RESTARTS   AGE
    coredns-f9fd979d6-gzfcs                0/1     Pending   0          49m
    coredns-f9fd979d6-t2wmc                0/1     Pending   0          49m
    etcd-k8s-master01                      1/1     Running   0          49m
    etcd-k8s-master02                      1/1     Running   0          2m47s
    etcd-k8s-master03                      1/1     Running   0          2m25s
    kube-apiserver-k8s-master01            1/1     Running   0          49m
    kube-apiserver-k8s-master02            1/1     Running   0          2m39s
    kube-apiserver-k8s-master03            1/1     Running   1          3m27s
    kube-controller-manager-k8s-master01   1/1     Running   1          49m
    kube-controller-manager-k8s-master02   1/1     Running   0          2m46s
    kube-controller-manager-k8s-master03   1/1     Running   0          2m31s
    kube-proxy-66ddd                       1/1     Running   0          49m
    kube-proxy-pr4z6                       1/1     Running   0          4m10s
    kube-proxy-ws2wh                       1/1     Running   0          3m9s
    kube-scheduler-k8s-master01            1/1     Running   1          49m
    kube-scheduler-k8s-master02            1/1     Running   0          2m29s
    kube-scheduler-k8s-master03            1/1     Running   0          2m24s
    [root@k8s-master01 etc]# vim haproxy.cfg 
    [root@k8s-master01 etc]# pwd
    /data/lb/etc
    [root@k8s-master01 etc]# 

    [root@k8s-master01 etc]# docker ps -a

    CONTAINER ID   IMAGE                   COMMAND                  CREATED          STATUS                       PORTS                                       NAMES

    99ecef87ff96   09d665d529d0            "kube-controller-man…"   8 minutes ago    Up 8 minutes                                                             k8s_kube-controller-manager_kube-controller-manager-k8s-master01_kube-system_9cc77fe9d985555691b0387f795c0314_1

    e1b446101ac5   cbdc8369d8b1            "kube-scheduler --au…"   8 minutes ago    Up 8 minutes                                                             k8s_kube-scheduler_kube-scheduler-k8s-master01_kube-system_5146743ebb284c11f03dc85146799d8b_1

    78f4fd11d7e6   bc9c328f379c            "/usr/local/bin/kube…"   54 minutes ago   Up 54 minutes                                                            k8s_kube-proxy_kube-proxy-66ddd_kube-system_56a95ad6-bad6-4827-9e77-26cac1945621_0

    e7287d019489   k8s.gcr.io/pause:3.2    "/pause"                 54 minutes ago   Up 54 minutes                                                            k8s_POD_kube-proxy-66ddd_kube-system_56a95ad6-bad6-4827-9e77-26cac1945621_0

    22001746bf4a   09d665d529d0            "kube-controller-man…"   55 minutes ago   Exited (255) 8 minutes ago                                               k8s_kube-controller-manager_kube-controller-manager-k8s-master01_kube-system_9cc77fe9d985555691b0387f795c0314_0

    2cc4f97290bb   cbdc8369d8b1            "kube-scheduler --au…"   55 minutes ago   Exited (255) 8 minutes ago                                               k8s_kube-scheduler_kube-scheduler-k8s-master01_kube-system_5146743ebb284c11f03dc85146799d8b_0

    fe1d6e7184a9   d4ca8726196c            "etcd --advertise-cl…"   55 minutes ago   Up 55 minutes                                                            k8s_etcd_etcd-k8s-master01_kube-system_07c527d8e97bbe65c2b37e6f46230834_0

    d545522573c8   1b74e93ece2f            "kube-apiserver --ad…"   55 minutes ago   Up 55 minutes                                                            k8s_kube-apiserver_kube-apiserver-k8s-master01_kube-system_0b9d125e1199ca40379196506a5076e0_0

    94bdebc9dc91   k8s.gcr.io/pause:3.2    "/pause"                 55 minutes ago   Up 55 minutes                                                            k8s_POD_kube-scheduler-k8s-master01_kube-system_5146743ebb284c11f03dc85146799d8b_0

    a7a264408c8a   k8s.gcr.io/pause:3.2    "/pause"                 55 minutes ago   Up 55 minutes                                                            k8s_POD_kube-controller-manager-k8s-master01_kube-system_9cc77fe9d985555691b0387f795c0314_0

    5001d630076d   k8s.gcr.io/pause:3.2    "/pause"                 55 minutes ago   Up 55 minutes                                                            k8s_POD_kube-apiserver-k8s-master01_kube-system_0b9d125e1199ca40379196506a5076e0_0

    3e6967dd6611   k8s.gcr.io/pause:3.2    "/pause"                 55 minutes ago   Up 55 minutes                                                            k8s_POD_etcd-k8s-master01_kube-system_07c527d8e97bbe65c2b37e6f46230834_0

    83c857f2e1a9   wise2c/keepalived-k8s   "/usr/bin/keepalived…"   3 hours ago      Up 3 hours                                                               Keepalived-K8S

    0ad4095cd261   wise2c/haproxy-k8s      "/docker-entrypoint.…"   3 hours ago      Up 3 hours                   0.0.0.0:6444->6444/tcp, :::6444->6444/tcp   HAProxy-K8S

    [root@k8s-master01 etc]# docker rm -f HAProxy-K8S && bash /data/lb/start-haproxy.sh 

    HAProxy-K8S

    df925c20f698b8dd90aa0a6808b0b9515b7b90705d541322fd05eba46585cba7

    [root@k8s-master01 etc]# 

    安装网络插件flannel

    [root@k8s-master01 etc]# cd /usr/local/kubernetes/yaml/
    [root@k8s-master01 yaml]# ll
    总用量 12
    -rw-r--r-- 1 root root 6087 6月   1 14:39 kubeadm-init.log
    -rw-r--r-- 1 root root 2659 6月   1 14:38 kubeadm.yaml
    [root@k8s-master01 yaml]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    --2021-06-01 15:41:36--  https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    正在解析主机 raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.110.133, 185.199.108.133, 185.199.109.133, ...
    正在连接 raw.githubusercontent.com (raw.githubusercontent.com)|185.199.110.133|:443... 已连接。
    已发出 HTTP 请求,正在等待回应... 200 OK
    长度:4813 (4.7K) [text/plain]
    正在保存至: “kube-flannel.yml”
    
    100%[================================================================================================================================================================>] 4,813       24.9KB/s 用时 0.2s   
    
    2021-06-01 15:41:38 (24.9 KB/s) - 已保存 “kube-flannel.yml” [4813/4813])
    
    [root@k8s-master01 yaml]# ll
    总用量 20
    -rw-r--r-- 1 root root 6087 6月   1 14:39 kubeadm-init.log
    -rw-r--r-- 1 root root 2659 6月   1 14:38 kubeadm.yaml
    -rw-r--r-- 1 root root 4813 6月   1 15:41 kube-flannel.yml
    [root@k8s-master01 yaml]# vim kube-flannel.yml 
    [root@k8s-master01 yaml]# kubectl apply -f kube-flannel.yml 
    podsecuritypolicy.policy/psp.flannel.unprivileged created
    clusterrole.rbac.authorization.k8s.io/flannel created
    clusterrolebinding.rbac.authorization.k8s.io/flannel created
    serviceaccount/flannel created
    configmap/kube-flannel-cfg created
    daemonset.apps/kube-flannel-ds created
    [root@k8s-master01 yaml]# kubectl get pod -n kube-system
    NAME                                   READY   STATUS     RESTARTS   AGE
    coredns-f9fd979d6-gzfcs                0/1     Pending    0          66m
    coredns-f9fd979d6-t2wmc                0/1     Pending    0          66m
    etcd-k8s-master01                      1/1     Running    0          66m
    etcd-k8s-master02                      1/1     Running    0          19m
    etcd-k8s-master03                      1/1     Running    0          19m
    kube-apiserver-k8s-master01            1/1     Running    0          66m
    kube-apiserver-k8s-master02            1/1     Running    0          19m
    kube-apiserver-k8s-master03            1/1     Running    1          20m
    kube-controller-manager-k8s-master01   1/1     Running    1          66m
    kube-controller-manager-k8s-master02   1/1     Running    0          19m
    kube-controller-manager-k8s-master03   1/1     Running    1          19m
    kube-flannel-ds-g65qv                  0/1     Init:0/1   0          31s
    kube-flannel-ds-gq26x                  0/1     Init:0/1   0          31s
    kube-flannel-ds-ktsgp                  0/1     Init:0/1   0          31s
    kube-proxy-66ddd                       1/1     Running    0          66m
    kube-proxy-pr4z6                       1/1     Running    0          21m
    kube-proxy-ws2wh                       1/1     Running    0          20m
    kube-scheduler-k8s-master01            1/1     Running    1          66m
    kube-scheduler-k8s-master02            1/1     Running    0          19m
    kube-scheduler-k8s-master03            1/1     Running    0          19m
    [root@k8s-master01 yaml]# 
    [root@k8s-master01 yaml]# kubectl get pod -n kube-system
    NAME                                   READY   STATUS    RESTARTS   AGE
    coredns-f9fd979d6-gzfcs                1/1     Running   0          69m
    coredns-f9fd979d6-t2wmc                1/1     Running   0          69m
    etcd-k8s-master01                      1/1     Running   0          69m
    etcd-k8s-master02                      1/1     Running   0          22m
    etcd-k8s-master03                      1/1     Running   0          21m
    kube-apiserver-k8s-master01            1/1     Running   0          69m
    kube-apiserver-k8s-master02            1/1     Running   0          22m
    kube-apiserver-k8s-master03            1/1     Running   1          22m
    kube-controller-manager-k8s-master01   1/1     Running   1          69m
    kube-controller-manager-k8s-master02   1/1     Running   1          22m
    kube-controller-manager-k8s-master03   1/1     Running   1          22m
    kube-flannel-ds-g65qv                  1/1     Running   0          3m8s
    kube-flannel-ds-gq26x                  1/1     Running   0          3m8s
    kube-flannel-ds-ktsgp                  1/1     Running   0          3m8s
    kube-proxy-66ddd                       1/1     Running   0          69m
    kube-proxy-pr4z6                       1/1     Running   0          23m
    kube-proxy-ws2wh                       1/1     Running   0          22m
    kube-scheduler-k8s-master01            1/1     Running   1          69m
    kube-scheduler-k8s-master02            1/1     Running   0          22m
    kube-scheduler-k8s-master03            1/1     Running   0          21m
    [root@k8s-master01 yaml]# 

    修改

    [root@k8s-master03 ~]# cat .kube/config 
    apiVersion: v1
    clusters:
    - cluster:
        certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeE1EWXdNVEEyTXpnME0xb1hEVE14TURVek1EQTJNemcwTTFvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTGpjCnR0NnhoU2tpN2JoQXlXTFdsRVRwdlEyRXpSVVF1dFhvWHZKTDRJaTlwaVBsNVBFRmVjbmRJbjFscU1UUy9XUFQKTHkrYkc4MDladWI5YUtaSFBPc2UvbUhUdVVjRzZ2WDJvS3prSk9WZHYzbytkSmNrcHhXeUU3ZXFOZW5ldXR6UQpZdmg3cFo4WElGTjFMQS9pVUdKQ200bk5pL1BvNnZBTXJydHRzVHdlbDVRMFNCc0ZlelVwVTBYSkdaRmwxOWIxCjNGdTkwaWN1bGJEREZLSzduL2xFSzl1QUNVQk5ZRFVoRENQOG1DOUhiUHBYL3RxZkVSMkF5eXBwdzNwd3hNVi8KaWszVmZvYjEvMTgwRmtoL1NrOGZrN2FQZXVJc2UxQnFrMjBxWGFJMk9TUUJEazJQbGRFVm01bjU4am9yNWNYZgovb3AvYWU0OVlDUlJFcWU4SzhrQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFcE1Ub1M0T1BCVjZxNFZLa1p3VmhTTUoxaFdNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFCN09oZWFJNE1XblVUSCt6dUhPQkhZY05kL2xGVDNJc0hCdThtcFBiMlZMaVkzSklUbwpsZlhpN1NIZ0pzdDh4TTBDODVOTW9KNjIvQnJYTzlVSll5dlJRQjlUNFQvQWsvWjRDVG0raWVCenFzUVprOWZXCmRmMTNlWnZ0aERoQlYxS1JRWURKT1N5ZW5qbmpadjVoQ1NvelNVUzRSY21SRUJWSVNRQit6RGVHR2hNMGczUmgKV1pNVUdkMnVaUjlOaG9HaHZ5VjE0WE5zbFdxUklPQTlHT2Y3bXNLVnNFbHVoL1lBcm0yRURHaVhJdnpXM0RUQgpoYkVORzdLSkFOMGd0RFdNU2V0NWp2WG56QmxMcHMzT0JUQkNpaEsrUEczYWNXVkkzSEdRMVl0clkzSVNBc2w5CkpPUzA4alNtL0c0WUVxR0tjMzdFTVlrYzVuTUliajZqN1JPdQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
        server: https://10.0.110.173:6443
      name: kubernetes

    查看ETCD集群状态

    [root@k8s-master01 ~]# kubectl get endpoints kube-controller-manager --namespace=kube-system  -o yaml
    apiVersion: v1
    kind: Endpoints
    metadata:
      annotations:
        control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master01_09f20c1b-9099-46dc-a9a3-928a04b7f3ef","leaseDurationSeconds":15,"acquireTime":"2021-06-01T07:47:49Z","renewTime":"2021-06-01T07:55:51Z","leaderTransitions":3}'
      creationTimestamp: "2021-06-01T06:39:34Z"
      managedFields:
      - apiVersion: v1
        fieldsType: FieldsV1
        fieldsV1:
          f:metadata:
            f:annotations:
              .: {}
              f:control-plane.alpha.kubernetes.io/leader: {}
        manager: kube-controller-manager
        operation: Update
        time: "2021-06-01T07:55:51Z"
      name: kube-controller-manager
      namespace: kube-system
      resourceVersion: "10365"
      selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
      uid: d9c50ccd-f319-48ab-8339-c8005035153c
    [root@k8s-master01 ~]# kubectl get endpoints kube-scheduler --namespace=kube-system  -o yaml
    apiVersion: v1
    kind: Endpoints
    metadata:
      annotations:
        control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master03_95f86e21-1f0b-4e9d-a4c7-428184959b8a","leaseDurationSeconds":15,"acquireTime":"2021-06-01T07:26:18Z","renewTime":"2021-06-01T07:57:16Z","leaderTransitions":1}'
      creationTimestamp: "2021-06-01T06:39:35Z"
      managedFields:
      - apiVersion: v1
        fieldsType: FieldsV1
        fieldsV1:
          f:metadata:
            f:annotations:
              .: {}
              f:control-plane.alpha.kubernetes.io/leader: {}
        manager: kube-scheduler
        operation: Update
        time: "2021-06-01T07:57:16Z"
      name: kube-scheduler
      namespace: kube-system
      resourceVersion: "10566"
      selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
      uid: 93c686d4-172b-48cc-9997-3a424bbc1fa6
  • 相关阅读:
    【最大流之EdmondsKarp算法】【HDU1532】模板题
    【矩阵乘法经典应用】【ZOJ3497】【Mistwa】
    【矩阵专题】
    【斐波拉契+数论+同余】【ZOJ3707】Calculate Prime S
    对拍BAT
    【枚举+贪心】【ZOJ3715】【Kindergarten Electiond】
    计算(a/b)%c
    斐波拉契数列性质
    【类克鲁斯卡尔做法+枚举最小边】【HDU1598】【find the most comfortable road】
    【并查集+拓扑排序】【HDU1811】【Rank of Tetris】
  • 原文地址:https://www.cnblogs.com/tian880820/p/14832418.html
Copyright © 2020-2023  润新知