• k8s1.20.1 集群安装


    1 环境准备

    1. 1 机器环境

    节点CPU核数必须是 :>= 2核 /内存要求必须是:>=2G ,否则k8s无法启动

    DNS网络: 最好设置为 本地网络连通的DNS,否则网络不通,无法下载一些镜像

    linux内核: linux内核必须是 4 版本以上,因此必须把linux核心进行升级

    节点hostname作用IP
    kmaster master 192.168.8.121
    knode1 node1 192.168.8.122
    knode2 node2 192.168.8.123

    1.2 hostname

    [root@base1 ~]# hostnamectl set-hostname kmaster --static
    [root@base2 ~]# hostnamectl set-hostname knode1 --static
    [root@base3 ~]# hostnamectl set-hostname knode2 --static
    

    1.3 网络设置

    [root@base1 ~]# vi /etc/sysconfig/network-scripts/ifcfg-ens33
    BOOTPROTO="static" #dhcp改为static 
    ONBOOT="yes" #开机启用本配置
    IPADDR=192.168.8.121 #静态IP 192.168.8.122/192.168.8.123
    GATEWAY=192.168.8.2 #默认网关
    NETMASK=255.255.255.0 #子网掩码
    DNS1=114.114.114.114 #DNS 配置
    DNS2=8.8.8.8 #DNS 配置
    
    $# reboot
    

    1.4 查看主机名

    hostname
    

    1.5 配置IP host映射关系

    vi /etc/hosts
    192.168.8.121 kmaster
    192.168.8.122 knode1
    192.168.8.123 knode2
    

    1.6 安装依赖环境,注意:每一台机器都需要安装此依赖环境

    yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstatlibseccomp wget vim net-tools git iproute lrzsz bash-completion tree bridge-utils unzip bind-utils gcc
    

    1.7 安装iptables,启动iptables,设置开机自启,清空iptables规则,保存当前规则到默认规则

    # 关闭防火墙
    systemctl stop firewalld && systemctl disable firewalld
    # 置空iptables
    yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
    

    1.8 关闭selinux

    # 闭swap分区【虚拟内存】并且永久关闭虚拟内存
    swapoff -a && sed -i '/ swap / s/^(.*)$/#1/g' /etc/fstab
    # 关闭selinux
    setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
    

    1.9 升级Linux内核为4.44版本

    rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
    #安装内核
    yum --enablerepo=elrepo-kernel install -y kernel-lt
    #设置开机从新内核启动
    grub2-set-default 'CentOS Linux (4.4.248-1.el7.elrepo.x86_64) 7 (Core)'
    4.4.248-1.el7.elrepo.x86_64
    
    reboot
    #注意:设置完内核后,需要重启服务器才会生效。
    #查询内核
    uname -r
    

    2 安装k8s

    2.1 对于k8s,调整内核参数 kubernetes.conf

    cat > kubernetes.conf <<EOF
    net.bridge.bridge-nf-call-iptables=1
    net.bridge.bridge-nf-call-ip6tables=1
    net.ipv4.ip_forward=1
    net.ipv4.tcp_tw_recycle=0
    vm.swappiness=0
    vm.overcommit_memory=1
    vm.panic_on_oom=0
    fs.inotify.max_user_instances=8192
    fs.inotify.max_user_watches=1048576
    fs.file-max=52706963
    fs.nr_open=52706963
    net.ipv6.conf.all.disable_ipv6=1
    net.netfilter.nf_conntrack_max=2310720
    EOF
    #将优化内核文件拷贝到/etc/sysctl.d/文件夹下,这样优化文件开机的时候能够被调用
    cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
    #手动刷新,让优化文件立即生效
    sysctl -p /etc/sysctl.d/kubernetes.conf
    sysctl: cannot stat /proc/sys/net/netfilter/nf_conntrack_max: No such file or directory
    

    错误解决:

    lsmod |grep conntrack
    modprobe ip_conntrack
    lsmod |grep conntrack
    nf_conntrack_ipv4      20480  0
    nf_defrag_ipv4         16384  1 nf_conntrack_ipv4
    nf_conntrack          114688  1 nf_conntrack_ipv4
    
    sysctl -p /etc/sysctl.d/kubernetes.conf
    net.bridge.bridge-nf-call-iptables = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    net.ipv4.ip_forward = 1
    net.ipv4.tcp_tw_recycle = 0
    vm.swappiness = 0
    vm.overcommit_memory = 1
    vm.panic_on_oom = 0
    fs.inotify.max_user_instances = 8192
    fs.inotify.max_user_watches = 1048576
    fs.file-max = 52706963
    fs.nr_open = 52706963
    net.ipv6.conf.all.disable_ipv6 = 1
    net.netfilter.nf_conntrack_max = 2310720
    

    2.2 调整系统临时区

    #设置系统时区为中国/上海
    timedatectl set-timezone Asia/Tokyo
    #将当前的UTC 时间写入硬件时钟
    timedatectl set-local-rtc 0
    #重启依赖于系统时间的服务
    systemctl restart rsyslog
    systemctl restart crond
    

    2.3 关闭系统不需要的服务

    systemctl stop postfix && systemctl disable postfix
    

    2.4 设置日志保存方式

    2.4.1 创建保存日志的目录

    mkdir /var/log/journal
    

    2.4.2 创建配置文件存放目录

    mkdir /etc/systemd/journald.conf.d
    

    2.4.3 创建配置文件

    cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
    [Journal]
    Storage=persistent
    Compress=yes
    SyncIntervalSec=5m
    RateLimitInterval=30s
    RateLimitBurst=1000
    SystemMaxUse=10G
    SystemMaxFileSize=200M
    MaxRetentionSec=2week
    ForwardToSyslog=no
    EOF
    

    2.4.4 重启systemd journald 的配置

    systemctl restart systemd-journald
    

    2.4.5 打开文件数调整(可忽略,不执行)

    echo "* soft nofile 65536" >> /etc/security/limits.conf
    echo "* hard nofile 65536" >> /etc/security/limits.conf
    

    2.4.6 kube-proxy 开启 ipvs 前置条件

    modprobe br_netfilter
    cat > /etc/sysconfig/modules/ipvs.modules <<EOF
    #!/bin/bash
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    #使用lsmod命令查看这些文件是否被引导
    chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
    ip_vs_sh               16384  0
    ip_vs_wrr              16384  0
    ip_vs_rr               16384  0
    ip_vs                 147456  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
    nf_conntrack_ipv4      20480  0
    nf_defrag_ipv4         16384  1 nf_conntrack_ipv4
    nf_conntrack          114688  2 ip_vs,nf_conntrack_ipv4
    libcrc32c              16384  2 xfs,ip_vs
    

    3 docker部署

    3.1 安装docker

    yum install -y yum-utils device-mapper-persistent-data lvm2
    
    #紧接着配置一个稳定的仓库、仓库配置会保存到/etc/yum.repos.d/docker-ce.repo文件中
    yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
    #更新Yum安装的相关Docker软件包&安装Docker CE
    yum update -y && yum install docker-ce
    

    3.2 设置docker daemon文件

    #创建/etc/docker目录
    mkdir /etc/docker
    #更新daemon.json文件
    cat > /etc/docker/daemon.json <<EOF
    {"exec-opts":["native.cgroupdriver=systemd"],"log-driver":"json-file","log-opts":{"max-size":"100m"}}
    EOF
    #注意:一定注意编码问题,出现错误---查看命令:journalctl -amu docker 即可发现错误
    #创建,存储docker配置文件
    mkdir -p /etc/systemd/system/docker.service.d
    

    3.3 重启docker服务

    systemctl daemon-reload && systemctl restart docker && systemctl enable docker
    

    4 kubeadm[一键安装k8s]

    4.1 yum仓库镜像

    国内

    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
           http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    

    官网

    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=1
    repo_gpgcheck=1
    gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
    EOF
    

    4.2 安装kubeadm 、kubelet、kubectl(1.20.1)

    yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
    #启动 kubelet
    systemctl enable kubelet && systemctl start kubelet
    

    5 准备k8s镜像

    5.1 在线拉取镜像

    生成默认kubeadm.conf文件

    kubeadm config print init-defaults > kubeadm.conf
    

    编辑kubeadm.conf,将Kubernetes版本修改为v1.20.1

    下载镜像

    kubeadm config images pull --config kubeadm.conf
    [config/images] Pulled k8s.gcr.io/kube-apiserver:v1.20.1
    [config/images] Pulled k8s.gcr.io/kube-controller-manager:v1.20.1
    [config/images] Pulled k8s.gcr.io/kube-scheduler:v1.20.1
    [config/images] Pulled k8s.gcr.io/kube-proxy:v1.20.1
    [config/images] Pulled k8s.gcr.io/pause:3.2
    [config/images] Pulled k8s.gcr.io/etcd:3.4.13-0
    [config/images] Pulled k8s.gcr.io/coredns:1.7.0
    
    docker images
    k8s.gcr.io/kube-proxy                v1.20.1    e3f6fcd87756   11 days ago     118MB
    k8s.gcr.io/kube-apiserver            v1.20.1    75c7f7112080   11 days ago     122MB
    k8s.gcr.io/kube-controller-manager   v1.20.1    2893d78e47dc   11 days ago     116MB
    k8s.gcr.io/kube-scheduler            v1.20.1    4aa0b4397bbb   11 days ago     46.4MB
    k8s.gcr.io/etcd                      3.4.13-0   0369cf4303ff   4 months ago    253MB
    k8s.gcr.io/coredns                   1.7.0      bfe3a36ebd25   6 months ago    45.2MB
    k8s.gcr.io/pause                     3.2        80d28bedfe5d   10 months ago   683kB
    

    保存镜像

    mkdir kubeadm-basic.images
    cd kubeadm-basic.images
    docker save k8s.gcr.io/kube-apiserver:v1.20.1 > apiserver.tar
    docker save k8s.gcr.io/coredns:1.7.0 > coredns.tar
    docker save k8s.gcr.io/etcd:3.4.13-0 > etcd.tar
    docker save k8s.gcr.io/kube-controller-manager:v1.20.1 > kubec-con-man.tar
    docker save k8s.gcr.io/pause:3.2 > pause.tar
    docker save k8s.gcr.io/kube-proxy:v1.20.1 > proxy.tar
    docker save k8s.gcr.io/kube-scheduler:v1.20.1 > scheduler.tar
    
    cd ..
    tar zcvf kubeadm-basic.images.tar.gz kubeadm-basic.images
    

    5.2 离线镜像

    kubeadm-basic.images.tar.gz

    上传镜像压缩包,把压缩包中的镜像导入到本地镜像仓库

    [root@kmaster ~]# ll
    total 216676
    -rw-------. 1 root root      1391 Dec 22 04:42 anaconda-ks.cfg
    drwxr-xr-x  2 root root       142 Dec 30 07:55 kubeadm-basic.images
    -rw-r--r--  1 root root 221857746 Dec 30 08:01 kubeadm-basic.images.tar.gz
    -rw-r--r--  1 root root       827 Dec 30 07:34 kubeadm.conf
    -rw-r--r--  1 root root        20 Dec 30 07:00 kube-images.tar.gz
    -rw-r--r--  1 root root       364 Dec 30 03:40 kubernetes.conf
    [root@kmaster ~]# ll kubeadm-basic.images
    total 692188
    -rw-r--r-- 1 root root 122923520 Dec 30 07:54 apiserver.tar
    -rw-r--r-- 1 root root  45364736 Dec 30 07:54 coredns.tar
    -rw-r--r-- 1 root root 254677504 Dec 30 07:54 etcd.tar
    -rw-r--r-- 1 root root 117107200 Dec 30 07:54 kubec-con-man.tar
    -rw-r--r-- 1 root root    691712 Dec 30 07:55 pause.tar
    -rw-r--r-- 1 root root 120377856 Dec 30 07:55 proxy.tar
    -rw-r--r-- 1 root root  47643136 Dec 30 07:55 scheduler.tar
    

    编写脚本问题,导入镜像包到本地docker镜像仓库:

    # kubeadm 初始化 k8s 集群的时候,会从gce Google云中下载响应的镜像,且镜像相对比较大,下载比较慢
    #1 导入镜像脚本代码(在任意目录下创建sh脚本文件:image-load.sh)
    #! /bin/bash
    #注意 镜像解压的目录位置
    ls /root/kubeadm-basic.images > /tmp/images-list.txt
    cd /root/kubeadm-basic.images
    for i in $(cat /tmp/images-list.txt)
    do
        docker load -i $i
    done
    rm -rf /tmp/images-list.txt
    
    #2 修改权限,可执行权限
    chmod 755 image-load.sh
    
    #3 开始执行,镜像导入
    ./image-load.sh
    
    #4 传输文件及镜像到其他node节点
    #拷贝到knode1节点
    scp -r image-load.sh kubeadm-basic.images root@knode1:/root/
    #拷贝到knode2
    scp -r image-load.sh kubeadm-basic.images root@knode2:/root/
    

    5.3 node节点导入镜像

    knode1导入镜像

    [root@knode1 ~]# ./image-load.sh
    Loaded image: k8s.gcr.io/kube-apiserver:v1.20.1
    Loaded image: k8s.gcr.io/coredns:1.7.0
    Loaded image: k8s.gcr.io/etcd:3.4.13-0
    Loaded image: k8s.gcr.io/kube-controller-manager:v1.20.1
    Loaded image: k8s.gcr.io/pause:3.2
    Loaded image: k8s.gcr.io/kube-proxy:v1.20.1
    Loaded image: k8s.gcr.io/kube-scheduler:v1.20.1
    [root@knode1 ~]# docker images
    REPOSITORY                           TAG        IMAGE ID       CREATED         SIZE
    k8s.gcr.io/kube-proxy                v1.20.1    e3f6fcd87756   11 days ago     118MB
    k8s.gcr.io/kube-apiserver            v1.20.1    75c7f7112080   11 days ago     122MB
    k8s.gcr.io/kube-controller-manager   v1.20.1    2893d78e47dc   11 days ago     116MB
    k8s.gcr.io/kube-scheduler            v1.20.1    4aa0b4397bbb   11 days ago     46.4MB
    k8s.gcr.io/etcd                      3.4.13-0   0369cf4303ff   4 months ago    253MB
    k8s.gcr.io/coredns                   1.7.0      bfe3a36ebd25   6 months ago    45.2MB
    k8s.gcr.io/pause                     3.2        80d28bedfe5d   10 months ago   683kB
    

    knode2导入镜像

    [root@knode2 ~]# ./image-load.sh
    Loaded image: k8s.gcr.io/kube-apiserver:v1.20.1
    Loaded image: k8s.gcr.io/coredns:1.7.0
    Loaded image: k8s.gcr.io/etcd:3.4.13-0
    Loaded image: k8s.gcr.io/kube-controller-manager:v1.20.1
    Loaded image: k8s.gcr.io/pause:3.2
    Loaded image: k8s.gcr.io/kube-proxy:v1.20.1
    Loaded image: k8s.gcr.io/kube-scheduler:v1.20.1
    [root@knode2 ~]# docker images
    REPOSITORY                           TAG        IMAGE ID       CREATED         SIZE
    k8s.gcr.io/kube-proxy                v1.20.1    e3f6fcd87756   11 days ago     118MB
    k8s.gcr.io/kube-apiserver            v1.20.1    75c7f7112080   11 days ago     122MB
    k8s.gcr.io/kube-controller-manager   v1.20.1    2893d78e47dc   11 days ago     116MB
    k8s.gcr.io/kube-scheduler            v1.20.1    4aa0b4397bbb   11 days ago     46.4MB
    k8s.gcr.io/etcd                      3.4.13-0   0369cf4303ff   4 months ago    253MB
    k8s.gcr.io/coredns                   1.7.0      bfe3a36ebd25   6 months ago    45.2MB
    k8s.gcr.io/pause                     3.2        80d28bedfe5d   10 months ago   683kB
    

    6 k8s部署

    #初始化主节点----只需要在主节点执行
    
    #1 拉去yaml资源配置文件
    kubeadm config print init-defaults > kubeadm-config.yaml
    
    #2 修改yaml资源文件
    localAPIEndpoint:
        advertiseAddress: 192.168.8.121 # 注意: 修改配置文件的IP地址
    kubernetesVersion: v1.20.1 # 注意:修改版本号,必须和kubectl版本保持一致
    networking:
        dnsDomain: cluster.local
        # 指定flannel模型通信 pod网段地址,此网段和flannel网络一致
        podSubnet: "10.244.0.0/16"
        serviceSubnet: "10.96.0.0/12"
    #指定使用ipvs网络进行通信
    ---
    apiVersion: kubeadm.k8s.io/v1beta2
    kind: kubeProxyConfiguration
    featureGates:
        supportipvsproxymodedm.ymlvim kubeadm.yml: true
    mode: ipvs
    
    #3 初始化主节点,开始部署
    kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
    #注意:执行此命令,CPU核心数量必须大于1核,否则无法执行成功
    W1230 09:44:35.116411    1495 strict.go:47] unknown configuration schema.GroupVersionKind{Group:"kubeadm.k8s.io", Version:"v1beta2", Kind:"kubeProxyConfiguration"} for scheme definitions in "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme/scheme.go:31" and "k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs/scheme.go:28"
    [config] WARNING: Ignored YAML document with GroupVersionKind kubeadm.k8s.io/v1beta2, Kind=kubeProxyConfiguration
    [init] Using Kubernetes version: v1.20.1
    [preflight] Running pre-flight checks
            [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.1. Latest validated version: 19.03
    [preflight] Pulling images required for setting up a Kubernetes cluster
    [preflight] This might take a minute or two, depending on the speed of your internet connection
    [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
    [certs] Using certificateDir folder "/etc/kubernetes/pki"
    [certs] Generating "ca" certificate and key
    [certs] Generating "apiserver" certificate and key
    [certs] apiserver serving cert is signed for DNS names [kmaster kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.8.121]
    [certs] Generating "apiserver-kubelet-client" certificate and key
    [certs] Generating "front-proxy-ca" certificate and key
    [certs] Generating "front-proxy-client" certificate and key
    [certs] Generating "etcd/ca" certificate and key
    [certs] Generating "etcd/server" certificate and key
    [certs] etcd/server serving cert is signed for DNS names [kmaster localhost] and IPs [192.168.8.121 127.0.0.1 ::1]
    [certs] Generating "etcd/peer" certificate and key
    [certs] etcd/peer serving cert is signed for DNS names [kmaster localhost] and IPs [192.168.8.121 127.0.0.1 ::1]
    [certs] Generating "etcd/healthcheck-client" certificate and key
    [certs] Generating "apiserver-etcd-client" certificate and key
    [certs] Generating "sa" key and public key
    [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
    [kubeconfig] Writing "admin.conf" kubeconfig file
    [kubeconfig] Writing "kubelet.conf" kubeconfig file
    [kubeconfig] Writing "controller-manager.conf" kubeconfig file
    [kubeconfig] Writing "scheduler.conf" kubeconfig file
    [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
    [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
    [kubelet-start] Starting the kubelet
    [control-plane] Using manifest folder "/etc/kubernetes/manifests"
    [control-plane] Creating static Pod manifest for "kube-apiserver"
    [control-plane] Creating static Pod manifest for "kube-controller-manager"
    [control-plane] Creating static Pod manifest for "kube-scheduler"
    [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
    [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
    [apiclient] All control plane components are healthy after 8.503909 seconds
    [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
    [kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
    [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
    [upload-certs] Using certificate key:
    7ecfa579dfa66c0ea9c87146aa5130c1692b85a4d16cfc860473064a75c113c5
    [mark-control-plane] Marking the node kmaster as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
    [mark-control-plane] Marking the node kmaster as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
    [bootstrap-token] Using token: abcdef.0123456789abcdef
    [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
    [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
    [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
    [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
    [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
    [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
    [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
    [addons] Applied essential addon: CoreDNS
    [addons] Applied essential addon: kube-proxy
    
    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    Alternatively, if you are the root user, you can run:
    
      export KUBECONFIG=/etc/kubernetes/admin.conf
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 192.168.8.121:6443 --token abcdef.0123456789abcdef 
        --discovery-token-ca-cert-hash sha256:7459fa01464531734d3eee182461b77b043d31eff7df2233635654d7c199c947
    [root@kmaster ~]#
    

    参考kubeadm-config.yaml

    apiVersion: kubeadm.k8s.io/v1beta2
    bootstrapTokens:
    - groups:
      - system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef
      ttl: 24h0m0s
      usages:
      - signing
      - authentication
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 192.168.8.121
      bindPort: 6443
    nodeRegistration:
      criSocket: /var/run/dockershim.sock
      name: kmaster
      taints:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
    ---
    apiServer:
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controllerManager: {}
    dns:
      type: CoreDNS
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: k8s.gcr.io
    kind: ClusterConfiguration
    kubernetesVersion: v1.20.1
    networking:
      dnsDomain: cluster.local
      podSubnet: 10.244.0.0/16
      serviceSubnet: 10.96.0.0/12
    scheduler: {}
    ---
    apiVersion: kubeadm.k8s.io/v1beta2
    kind: kubeProxyConfiguration
    featureGates:
      supportipvsproxymodedm.ymlvim kubeadm.yml: true
      mode: ipvs
    
    

    按照k8s指示,执行以下命令:

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    export KUBECONFIG=/etc/kubernetes/admin.conf
    

    执行命令前:

    kubectl get node
    The connection to the server localhost:8080 was refused - did you specify the right host or port?
    

    执行命令后

    kubectl get node
    NAME      STATUS     ROLES                  AGE     VERSION
    kmaster   NotReady   control-plane,master   7m24s   v1.20.1
    

    我们发现已经可以成功查询node节点信息了,但是节点的状态却是NotReady,不是Runing的状态。

    原因是此时我们使用ipvs+flannel的方式进行网络通信,但是flannel网络插件还没有部署,因此节点状态为NotReady

    7 flannel插件

    #部署flannel网络插件---只需要在主节点执行
    #1 下载flannel网络插件
    wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    #2 部署flannel
    kubectl create -f kube-flannel.yml
    #也可进行部署网络
    kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    

    验证

    [root@kmaster ~]# kubectl get pod -n kube-system
    NAME                              READY   STATUS    RESTARTS   AGE
    coredns-74ff55c5b-5n6zs           1/1     Running   0          15m
    coredns-74ff55c5b-r9469           1/1     Running   0          15m
    etcd-kmaster                      1/1     Running   0          15m
    kube-apiserver-kmaster            1/1     Running   0          15m
    kube-controller-manager-kmaster   1/1     Running   0          15m
    kube-flannel-ds-n4sbp             1/1     Running   0          89s
    kube-proxy-t7bvn                  1/1     Running   0          15m
    kube-scheduler-kmaster            1/1     Running   0          15m
    

    8 追加Node节点

    # 假如主节点以及其余工作节点,执行安装日志中的命令即可
    # 查看日志文件
    cat kubeadm-init.log
    # 负责命令到其他几个node节点进行执行即可
    kubeadm join 192.168.8.121:6443 --token abcdef.0123456789abcdef 
        --discovery-token-ca-cert-hash sha256:7459fa01464531734d3eee182461b77b043d31eff7df2233635654d7c199c947
    

    knode1

    kubeadm join 192.168.8.121:6443 --token abcdef.0123456789abcdef 
        --discovery-token-ca-cert-hash sha256:7459fa01464531734d3eee182461b77b043d31eff7df2233635654d7c199c947
    

    knode2

    kubeadm join 192.168.8.121:6443 --token abcdef.0123456789abcdef 
        --discovery-token-ca-cert-hash sha256:7459fa01464531734d3eee182461b77b043d31eff7df2233635654d7c199c947
    

    9 验证状态

    [root@kmaster ~]# kubectl get node
    NAME      STATUS   ROLES                  AGE     VERSION
    kmaster   Ready    control-plane,master   26m     v1.20.1
    knode1    Ready    <none>                 5m37s   v1.20.1
    knode2    Ready    <none>                 5m28s   v1.20.1
    
    [root@kmaster ~]# kubectl get pod -n kube-system -o wide
    NAME                              READY   STATUS    RESTARTS   AGE     IP              NODE                                                                                                        NOMINATED NODE   READINESS GATES
    coredns-74ff55c5b-5n6zs           1/1     Running   0          27m     10.244.0.2      kmaster                                                                                                     <none>           <none>
    coredns-74ff55c5b-r9469           1/1     Running   0          27m     10.244.0.3      kmaster                                                                                                     <none>           <none>
    etcd-kmaster                      1/1     Running   0          27m     192.168.8.121   kmaster                                                                                                     <none>           <none>
    kube-apiserver-kmaster            1/1     Running   0          27m     192.168.8.121   kmaster                                                                                                     <none>           <none>
    kube-controller-manager-kmaster   1/1     Running   0          27m     192.168.8.121   kmaster                                                                                                     <none>           <none>
    kube-flannel-ds-9td5g             1/1     Running   0          7m12s   192.168.8.122   knode1                                                                                                      <none>           <none>
    kube-flannel-ds-n4sbp             1/1     Running   0          13m     192.168.8.121   kmaster                                                                                                     <none>           <none>
    kube-flannel-ds-rvfbt             1/1     Running   0          7m3s    192.168.8.123   knode2                                                                                                      <none>           <none>
    kube-proxy-knhtb                  1/1     Running   0          7m12s   192.168.8.122   knode1                                                                                                      <none>           <none>
    kube-proxy-t7bvn                  1/1     Running   0          27m     192.168.8.121   kmaster                                                                                                     <none>           <none>
    kube-proxy-vpxqm                  1/1     Running   0          7m3s    192.168.8.123   knode2                                                                                                      <none>           <none>
    kube-scheduler-kmaster            1/1     Running   0          27m     192.168.8.121   kmaster                                                                                                     <none>           <none>
    

    10 查看docker和k8s使用版本

    [root@kmaster ~]# docker version
    Client: Docker Engine - Community
     Version:           20.10.1
     API version:       1.41
     Go version:        go1.13.15
     Git commit:        831ebea
     Built:             Tue Dec 15 04:37:17 2020
     OS/Arch:           linux/amd64
     Context:           default
     Experimental:      true
    
    Server: Docker Engine - Community
     Engine:
      Version:          20.10.1
      API version:      1.41 (minimum version 1.12)
      Go version:       go1.13.15
      Git commit:       f001486
      Built:            Tue Dec 15 04:35:42 2020
      OS/Arch:          linux/amd64
      Experimental:     false
     containerd:
      Version:          1.4.3
      GitCommit:        269548fa27e0089a8b8278fc4fc781d7f65a939b
     runc:
      Version:          1.0.0-rc92
      GitCommit:        ff819c7e9184c13b7c2607fe6c30ae19403a7aff
     docker-init:
      Version:          0.19.0
      GitCommit:        de40ad0
    
    [root@kmaster ~]# kubectl version
    Client Version: version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.1", GitCommit:"c4d752765b3bbac2237bf87cf0b1c2e307844666", GitTreeState:"clean", BuildDate:"2020-12-18T12:09:25Z", GoVersion:"go1.15.5", Compiler:"gc", Platform:"linux/amd64"}
    Server Version: version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.1", GitCommit:"c4d752765b3bbac2237bf87cf0b1c2e307844666", GitTreeState:"clean", BuildDate:"2020-12-18T12:00:47Z", GoVersion:"go1.15.5", Compiler:"gc", Platform:"linux/amd64"}
    
    

    注意:

    使用docker版本20.10.1、Go版本1.13.15,
    使用k8s版本1.20.1、Go版本1.15.5

    我们只需要努力,然后剩下的交给时间。
  • 相关阅读:
    从程序员到项目经理(24):怎样给领导汇报工作
    从程序员到项目经理(22):对绩效考核的吐槽
    从程序员到项目经理(23):项目管理不能浑水摸鱼
    从程序员到项目经理(21):以德服人才能口服心服
    从程序员到项目经理(19):让员工为目标而干活
    从程序员到项目经理(20):谁都需要成就感
    从程序员到项目经理(18):想改变任何人都是徒劳的
    从程序员到项目经理(17):不要试图和下属做朋友
    从程序员到项目经理(16):你不是一个人在战斗--思维一换天地宽
    [SQL] 不合并重复数据 union all
  • 原文地址:https://www.cnblogs.com/lgj8/p/15059572.html
Copyright © 2020-2023  润新知