• kubernetes二进制高可用安装


    基本环境配置

    k8s GitHub
    与kubeadm安装基本环境配置相同

    基本组件安装

    生产环境中,建议使用小版本大于5的Kubernetes版本,比如1.19.5以后的才可用于生产环境。

    Containerd作为runtime

    所有节点安装 containerd, docker-ce-20.10

    yum -y install docker-ce-20.10.* docker-ce-cli-20.10.* containerd 
    

    配置Containerd所需的模块(所有节点)

    cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
    overlay
    br_netfilter
    EOF
    

    所有节点加载模块

    modprobe -- overlay
    modprobe -- br_netfilter
    

    所有节点配置Containerd所需的内核

    cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
    net.bridge.bridge-nf-call-iptables  = 1
    net.ipv4.ip_forward                 = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    EOF
    

    所有节点加载内核

    sysctl --system
    

    所有节点配置Containerd的配置文件

    [root@k8s-master01 ~]# cd /etc/containerd/
    [root@k8s-master01 containerd]# containerd config default | tee /etc/containerd/config.toml #重新生成一个
    [root@k8s-master01 containerd]# vim config.toml
    修改以下配置,将false改为true
    125             SystemdCgroup = true
    所有节点将sandbox_image的Pause镜像改成符合自己版本的地址registry.cn-hangzhou.aliyuncs.com/google_containers/pause
     61     sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6"
    

    所有节点启动Containerd并配置开机启动

    systemctl daemon-reload
    systemctl enable --now containerd
    

    所有节点配置crictl客户端连接的运行时位置

    cat > /etc/crictl.yaml <<EOF
    runtime-endpoint: unix:///run/containerd/containerd.sock
    image-endpoint: unix:///run/containerd/containerd.sock
    timeout: 10
    debug: false
    EOF
    

    Docker作为runtime

    Containerd和Docker选择其中一种即可

    所有节点安装docker-ce 20.10

    yum install docker-ce-20.10.* docker-ce-cli-20.10.* -y
    

    由于新版Kubelet建议使用systemd,所以把Docker的CgroupDriver也改成systemd

    mkdir /etc/docker
    cat > /etc/docker/daemon.json <<EOF
    {
      "exec-opts": ["native.cgroupdriver=systemd"]
    }
    EOF
    

    所有节点设置开机自启动Docker

    systemctl daemon-reload && systemctl enable --now docker
    

    安装包下载

    kubernetes部署安装包4xnf
    软件都是下载至master01,再传至其他节点

    kubernetes

    kubernetes安装包

    可以看到目前最新的版本是1.25版本目前还在alpha测试阶段不稳定,1.24版本小版本为2,生产选择小版本大于5的,目前合适的为1.23.8

    wget https://dl.k8s.io/v1.23.8/kubernetes-server-linux-amd64.tar.gz
    

    etcd

    etcd安装包

    wget https://github.com/etcd-io/etcd/releases/download/v3.5.4/etcd-v3.5.4-linux-amd64.tar.gz
    

    kubernetes&etcd安装

    [root@k8s-master01 ~]# tar -xf kubernetes-server-linux-amd64.tar.gz  --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
    [root@k8s-master01 ~]# tar -zxvf etcd-v3.5.4-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.5.4-linux-amd64/etcd{,ctl}
    etcd-v3.5.4-linux-amd64/etcdctl
    etcd-v3.5.4-linux-amd64/etcd
    

    查看版本

    [root@k8s-master01 ~]# kubelet --version
    Kubernetes v1.23.8
    [root@k8s-master01 ~]# etcdctl version
    etcdctl version: 3.5.4
    API version: 3.5
    

    将组件发送到其他节点
    发送到master节点

    [root@k8s-master01 ~]# for i in $(ls /usr/local/bin/kube*); do scp_file.sh master $i; done
    [root@k8s-master01 ~]# for i in $(ls /usr/local/bin/etcd*); do scp_file.sh master $i; done
    

    发送到node节点

    [root@k8s-master01 ~]# for i in $(ls /usr/local/bin/kube* | grep -E 'let|proxy'); do scp_file.sh node $i; done
    

    master节点查看

    [root@k8s-master02 ~]# ls /usr/local/bin/kube*
    /usr/local/bin/kube-apiserver           /usr/local/bin/kubelet
    /usr/local/bin/kube-controller-manager  /usr/local/bin/kube-proxy
    /usr/local/bin/kubectl                  /usr/local/bin/kube-scheduler
    

    node节点查看

    [root@k8s-node01 ~]# ls /usr/local/bin/kube*
    /usr/local/bin/kubelet  /usr/local/bin/kube-proxy
    

    所有节点创建目录

    mkdir -p /opt/cni/bin
    

    生成证书

    二进制安装最关键步骤

    Master01下载生成证书工具(下载不成功去网盘下载)

    [root@k8s-master01 ~]# wget "https://pkg.cfssl.org/R1.2/cfssl_linux-amd64" -O /usr/local/bin/cfssl
    [root@k8s-master01 ~]# wget "https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64" -O /usr/local/bin/cfssljson
    [root@k8s-master01 ~]# chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
    

    etcd证书

    所有Master节点创建etcd证书目录

    mkdir -p /etc/etcd/ssl 
    

    所有节点创建kubernetes相关目录

    mkdir -p /etc/kubernetes/pki /etc/etcd/ssl
    

    下载配置文件

    cd /root/ ; git clone git@github.com:Chuyio/kubernetes.git
    

    Master01节点生成etcd证书
    生成证书的CSR文件:证书签名请求文件,配置了一些域名、公司、单位

    以下内容,请注意修改IP地址,后续不再提醒

    [root@k8s-master01 ~]# cd ~/kubernetes/manual-installation-v1.23.x/pki/
    # 生成etcd CA证书和CA证书的key
    [root@k8s-master01 pki]# cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
    2022/06/28 15:30:31 [INFO] generating a new CA key and certificate from CSR
    2022/06/28 15:30:31 [INFO] generate received request
    2022/06/28 15:30:31 [INFO] received CSR
    2022/06/28 15:30:31 [INFO] generating key: rsa-2048
    2022/06/28 15:30:31 [INFO] encoded CSR
    2022/06/28 15:30:31 [INFO] signed certificate with serial number 516546769148812249601164115806107067562283372715
    
    [root@k8s-master01 pki]# cfssl gencert \
    >    -ca=/etc/etcd/ssl/etcd-ca.pem \
    >    -ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
    >    -config=ca-config.json \
    >    -hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.10.10,192.168.10.20,192.168.10.30 \
    >    -profile=kubernetes \
    >    etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
    2022/06/28 15:30:52 [INFO] generate received request
    2022/06/28 15:30:52 [INFO] received CSR
    2022/06/28 15:30:52 [INFO] generating key: rsa-2048
    2022/06/28 15:30:53 [INFO] encoded CSR
    2022/06/28 15:30:53 [INFO] signed certificate with serial number 121874240601574272764147220473812916769101778419
    

    将证书复制到其他master节点

    [root@k8s-master01 ~]# for i in {etcd-ca-key.pem,etcd-ca.pem,etcd-key.pem,etcd.pem}; do scp_file.sh master /etc/etcd/ssl/$i; done
    

    k8s组件证书

    Master01生成kubernetes证书

    [root@k8s-master01 ~]# cd ~/kubernetes/manual-installation-v1.23.x/pki/
    [root@k8s-master01 pki]# cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
    2022/06/28 15:40:16 [INFO] generating a new CA key and certificate from CSR
    2022/06/28 15:40:16 [INFO] generate received request
    2022/06/28 15:40:16 [INFO] received CSR
    2022/06/28 15:40:16 [INFO] generating key: rsa-2048
    2022/06/28 15:40:16 [INFO] encoded CSR
    2022/06/28 15:40:16 [INFO] signed certificate with serial number 68502869182185278843504976082509561758920717853
    

    10.96.0是k8s service的网段,如果说需要更改k8s service网段,那就需要更改10.96.0.1
    如果不是高可用集群,修改192.168.10.100为Master01的IP

    [root@k8s-master01 pki]# cfssl gencert   -ca=/etc/kubernetes/pki/ca.pem   -ca-key=/etc/kubernetes/pki/ca-key.pem   -config=ca-config.json   -hostname=10.96.0.1,192.168.10.100,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,192.168.10.10,192.168.10.20,192.168.10.30   -profile=kubernetes   apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
    2022/06/28 15:43:29 [INFO] generate received request
    2022/06/28 15:43:29 [INFO] received CSR
    2022/06/28 15:43:29 [INFO] generating key: rsa-2048
    2022/06/28 15:43:29 [INFO] encoded CSR
    2022/06/28 15:43:29 [INFO] signed certificate with serial number 222491809967277452754241583318945469757147217922
    

    生成apiserver的聚合证书。

    Requestheader-client-xxx requestheader-allowwd-xxx:aggerator

    [root@k8s-master01 pki]# cfssl gencert   -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca 
    2022/06/28 15:44:21 [INFO] generating a new CA key and certificate from CSR
    2022/06/28 15:44:21 [INFO] generate received request
    2022/06/28 15:44:21 [INFO] received CSR
    2022/06/28 15:44:21 [INFO] generating key: rsa-2048
    2022/06/28 15:44:21 [INFO] encoded CSR
    2022/06/28 15:44:21 [INFO] signed certificate with serial number 396951408590266773563761783265307251096359501499
    [root@k8s-master01 pki]# 
    [root@k8s-master01 pki]# cfssl gencert   -ca=/etc/kubernetes/pki/front-proxy-ca.pem   -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem   -config=ca-config.json   -profile=kubernetes   front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
    2022/06/28 15:44:21 [INFO] generate received request
    2022/06/28 15:44:21 [INFO] received CSR
    2022/06/28 15:44:21 [INFO] generating key: rsa-2048
    2022/06/28 15:44:21 [INFO] encoded CSR
    2022/06/28 15:44:21 [INFO] signed certificate with serial number 465018762783610630993811459514176489950080118210
    2022/06/28 15:44:21 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    [root@k8s-master01 pki]# echo $?    # 返回结果(忽略警告)
    0
    

    生成controller-manage的证书

    [root@k8s-master01 pki]# cfssl gencert \
    >    -ca=/etc/kubernetes/pki/ca.pem \
    >    -ca-key=/etc/kubernetes/pki/ca-key.pem \
    >    -config=ca-config.json \
    >    -profile=kubernetes \
    >    manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
    2022/06/28 15:46:11 [INFO] generate received request
    2022/06/28 15:46:11 [INFO] received CSR
    2022/06/28 15:46:11 [INFO] generating key: rsa-2048
    2022/06/28 15:46:12 [INFO] encoded CSR
    2022/06/28 15:46:12 [INFO] signed certificate with serial number 679063114634385587330345782725839750790858385407
    2022/06/28 15:46:12 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    # 注意,如果不是高可用集群,192.168.10.100:8443改为master01的地址,8443改为apiserver的端口,默认是6443
    
    # set-cluster:设置一个集群项
    [root@k8s-master01 pki]# kubectl config set-cluster kubernetes \
    >      --certificate-authority=/etc/kubernetes/pki/ca.pem \
    >      --embed-certs=true \
    >      --server=https://192.168.10.100:8443 \
    >      --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
    Cluster "kubernetes" set.
    
    # 设置一个环境项,一个上下文
    [root@k8s-master01 pki]# kubectl config set-context system:kube-controller-manager@kubernetes \
    >     --cluster=kubernetes \
    >     --user=system:kube-controller-manager \
    >     --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
    Context "system:kube-controller-manager@kubernetes" created.
    
    # set-credentials 设置一个用户项
    [root@k8s-master01 pki]# kubectl config set-credentials system:kube-controller-manager \
    >      --client-certificate=/etc/kubernetes/pki/controller-manager.pem \
    >      --client-key=/etc/kubernetes/pki/controller-manager-key.pem \
    >      --embed-certs=true \
    >      --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
    User "system:kube-controller-manager" set.
    
    # 使用某个环境当做默认环境
    [root@k8s-master01 pki]# kubectl config use-context system:kube-controller-manager@kubernetes \
    >      --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
    Switched to context "system:kube-controller-manager@kubernetes".
    

    以下直接执行即可

    cfssl gencert \
       -ca=/etc/kubernetes/pki/ca.pem \
       -ca-key=/etc/kubernetes/pki/ca-key.pem \
       -config=ca-config.json \
       -profile=kubernetes \
       scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
    
    # 注意,如果不是高可用集群,192.168.10.100:8443改为master01的地址,8443改为apiserver的端口,默认是6443
    
    kubectl config set-cluster kubernetes \
         --certificate-authority=/etc/kubernetes/pki/ca.pem \
         --embed-certs=true \
         --server=https://192.168.10.100:8443 \
         --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
    
    kubectl config set-credentials system:kube-scheduler \
         --client-certificate=/etc/kubernetes/pki/scheduler.pem \
         --client-key=/etc/kubernetes/pki/scheduler-key.pem \
         --embed-certs=true \
         --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
    
    kubectl config set-context system:kube-scheduler@kubernetes \
         --cluster=kubernetes \
         --user=system:kube-scheduler \
         --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
    
    kubectl config use-context system:kube-scheduler@kubernetes \
         --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
    
    
    cfssl gencert \
       -ca=/etc/kubernetes/pki/ca.pem \
       -ca-key=/etc/kubernetes/pki/ca-key.pem \
       -config=ca-config.json \
       -profile=kubernetes \
       admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
    
    # 注意,如果不是高可用集群,192.168.10.100:8443改为master01的地址,8443改为apiserver的端口,默认是6443
    
    kubectl config set-cluster kubernetes     --certificate-authority=/etc/kubernetes/pki/ca.pem     --embed-certs=true     --server=https://192.168.10.100:8443     --kubeconfig=/etc/kubernetes/admin.kubeconfig
    kubectl config set-credentials kubernetes-admin     --client-certificate=/etc/kubernetes/pki/admin.pem     --client-key=/etc/kubernetes/pki/admin-key.pem     --embed-certs=true     --kubeconfig=/etc/kubernetes/admin.kubeconfig
    
    kubectl config set-context kubernetes-admin@kubernetes     --cluster=kubernetes     --user=kubernetes-admin     --kubeconfig=/etc/kubernetes/admin.kubeconfig
    
    kubectl config use-context kubernetes-admin@kubernetes     --kubeconfig=/etc/kubernetes/admin.kubeconfig
    

    创建ServiceAccount Key --> secret

    [root@k8s-master01 pki]# openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
    Generating RSA private key, 2048 bit long modulus
    ........................+++
    .......................................................................................................................................................+++
    e is 65537 (0x10001)
    [root@k8s-master01 pki]# openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
    writing RSA key
    

    发送证书到其他master节点

    for i in {admin.kubeconfig,controller-manager.kubeconfig,scheduler.kubeconfig}; do scp_file.sh master /etc/kubernetes/$i; done
    for i in $(ls /etc/kubernetes/pki | grep -v etcd); do scp_file.sh master /etc/kubernetes/pki/$i; done
    

    查看证书文件

    [root@k8s-master01 pki]# ls /etc/kubernetes/pki/
    admin.csr          ca.csr                      front-proxy-ca.csr          sa.key
    admin-key.pem      ca-key.pem                  front-proxy-ca-key.pem      sa.pub
    admin.pem          ca.pem                      front-proxy-ca.pem          scheduler.csr
    apiserver.csr      controller-manager.csr      front-proxy-client.csr      scheduler-key.pem
    apiserver-key.pem  controller-manager-key.pem  front-proxy-client-key.pem  scheduler.pem
    apiserver.pem      controller-manager.pem      front-proxy-client.pem
    [root@k8s-master01 pki]# ls /etc/kubernetes/pki/ |wc -l
    23
    

    Kubernetes系统组件配置

    Etcd配置

    etcd配置大致相同,注意修改每个Master节点的etcd配置的主机名和IP地址

    Master01

    [root@k8s-master01 ~]# vim /etc/etcd/etcd.config.yml
    name: 'k8s-master01'
    data-dir: /var/lib/etcd
    wal-dir: /var/lib/etcd/wal
    snapshot-count: 5000
    heartbeat-interval: 100
    election-timeout: 1000
    quota-backend-bytes: 0
    listen-peer-urls: 'https://192.168.10.10:2380'
    listen-client-urls: 'https://192.168.10.10:2379,http://127.0.0.1:2379'
    max-snapshots: 3
    max-wals: 5
    cors:
    initial-advertise-peer-urls: 'https://192.168.10.10:2380'
    advertise-client-urls: 'https://192.168.10.10:2379'
    discovery:
    discovery-fallback: 'proxy'
    discovery-proxy:
    discovery-srv:
    initial-cluster: 'k8s-master01=https://192.168.10.10:2380,k8s-master02=https://192.168.10.20:2380,k8s-master03=https://192.168.10.30:2380'
    initial-cluster-token: 'etcd-k8s-cluster'
    initial-cluster-state: 'new'
    strict-reconfig-check: false
    enable-v2: true
    enable-pprof: true
    proxy: 'off'
    proxy-failure-wait: 5000
    proxy-refresh-interval: 30000
    proxy-dial-timeout: 1000
    proxy-write-timeout: 5000
    proxy-read-timeout: 0
    client-transport-security:
      cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
      key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
      client-cert-auth: true
      trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
      auto-tls: true
    peer-transport-security:
      cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
      key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
      peer-client-cert-auth: true
      trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
      auto-tls: true
    debug: false
    log-package-levels:
    log-outputs: [default]
    force-new-cluster: false
    

    Master02

    name: 'k8s-master02'
    data-dir: /var/lib/etcd
    wal-dir: /var/lib/etcd/wal
    snapshot-count: 5000
    heartbeat-interval: 100
    election-timeout: 1000
    quota-backend-bytes: 0
    listen-peer-urls: 'https://192.168.10.20:2380'
    listen-client-urls: 'https://192.168.10.20:2379,http://127.0.0.1:2379'
    max-snapshots: 3
    max-wals: 5
    cors:
    initial-advertise-peer-urls: 'https://192.168.10.20:2380'
    advertise-client-urls: 'https://192.168.10.20:2379'
    discovery:
    discovery-fallback: 'proxy'
    discovery-proxy:
    discovery-srv:
    initial-cluster: 'k8s-master01=https://192.168.10.10:2380,k8s-master02=https://192.168.10.20:2380,k8s-master03=https://192.168.10.30:2380'
    initial-cluster-token: 'etcd-k8s-cluster'
    initial-cluster-state: 'new'
    strict-reconfig-check: false
    enable-v2: true
    enable-pprof: true
    proxy: 'off'
    proxy-failure-wait: 5000
    proxy-refresh-interval: 30000
    proxy-dial-timeout: 1000
    proxy-write-timeout: 5000
    proxy-read-timeout: 0
    client-transport-security:
      cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
      key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
      client-cert-auth: true
      trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
      auto-tls: true
    peer-transport-security:
      cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
      key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
      peer-client-cert-auth: true
      trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
      auto-tls: true
    debug: false
    log-package-levels:
    log-outputs: [default]
    force-new-cluster: false
    

    Master03

    name: 'k8s-master03'
    data-dir: /var/lib/etcd
    wal-dir: /var/lib/etcd/wal
    snapshot-count: 5000
    heartbeat-interval: 100
    election-timeout: 1000
    quota-backend-bytes: 0
    listen-peer-urls: 'https://192.168.10.30:2380'
    listen-client-urls: 'https://192.168.10.30:2379,http://127.0.0.1:2379'
    max-snapshots: 3
    max-wals: 5
    cors:
    initial-advertise-peer-urls: 'https://192.168.10.30:2380'
    advertise-client-urls: 'https://192.168.10.30:2379'
    discovery:
    discovery-fallback: 'proxy'
    discovery-proxy:
    discovery-srv:
    initial-cluster: 'k8s-master01=https://192.168.10.10:2380,k8s-master02=https://192.168.10.20:2380,k8s-master03=https://192.168.10.30:2380'
    initial-cluster-token: 'etcd-k8s-cluster'
    initial-cluster-state: 'new'
    strict-reconfig-check: false
    enable-v2: true
    enable-pprof: true
    proxy: 'off'
    proxy-failure-wait: 5000
    proxy-refresh-interval: 30000
    proxy-dial-timeout: 1000
    proxy-write-timeout: 5000
    proxy-read-timeout: 0
    client-transport-security:
      cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
      key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
      client-cert-auth: true
      trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
      auto-tls: true
    peer-transport-security:
      cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
      key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
      peer-client-cert-auth: true
      trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
      auto-tls: true
    debug: false
    log-package-levels:
    log-outputs: [default]
    force-new-cluster: false
    

    创建Service

    所有Master节点创建etcd service并启动

    [root@k8s-master01 ~]# vim /usr/lib/systemd/system/etcd.service
    [Unit]
    Description=Etcd Service
    Documentation=https://coreos.com/etcd/docs/latest/
    After=network.target
    
    [Service]
    Type=notify
    ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
    Restart=on-failure
    RestartSec=10
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    Alias=etcd3.service
    [root@k8s-master01 ~]# scp_file.sh master /usr/lib/systemd/system/etcd.service
    

    所有Master节点创建etcd的证书目录

    mkdir /etc/kubernetes/pki/etcd
    ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
    systemctl daemon-reload
    systemctl enable --now etcd
    

    查看etcd状态

    export ETCDCTL_API=3
    etcdctl --endpoints="192.168.10.30:2379,192.168.10.20:2379,192.168.10.10:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem  endpoint status --write-out=table
    

    高可用配置

    本文不再叙述注意事项 与kubeadm高可用配置相同

    本文也是采用haproxy+keepalived的方式 kubeadm高可用安装部署

    haproxy

    所有master节点安装keepalivedhaproxy

    yum -y install keepalived haproxy 
    

    所有master节点配置haproxy,配置相同,注意修改IP

    vim /etc/haproxy/haproxy.cfg
    global
      maxconn  2000
      ulimit-n  16384
      log  127.0.0.1 local0 err
      stats timeout 30s
    
    defaults
      log global
      mode  http
      option  httplog
      timeout connect 5000
      timeout client  50000
      timeout server  50000
      timeout http-request 15s
      timeout http-keep-alive 15s
    
    frontend k8s-master
      bind 0.0.0.0:8443
      bind 127.0.0.1:8443
      mode tcp
      option tcplog
      tcp-request inspect-delay 5s
      default_backend k8s-master
    
    backend k8s-master
      mode tcp
      option tcplog
      option tcp-check
      balance roundrobin
      default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
      server k8s-master01    192.168.10.10:6443  check
      server k8s-master02    192.168.10.20:6443  check
      server k8s-master03    192.168.10.30:6443  check
    

    keepalived

    master01 keepalived
    所有Master节点配置KeepAlived,配置不一样,注意区分

    [root@k8s-master01 ~]# vim /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    global_defs {
        router_id LVS_DEVEL
    }
    vrrp_script chk_apiserver {
        script "/etc/keepalived/check_apiserver.sh"
        interval 5 
        weight -5
        fall 2
        rise 1
    }
    vrrp_instance VI_1 {
        state MASTER
        interface ens33
        mcast_src_ip 192.168.10.10
        virtual_router_id 51
        priority 101
        nopreempt
        advert_int 2
        authentication {
            auth_type PASS
            auth_pass K8SHA_KA_AUTH
        }
        virtual_ipaddress {
            192.168.10.100
        }
        track_script {
          chk_apiserver 
        } 
    }
    

    master02 keepalived

    ! Configuration File for keepalived
    global_defs {
        router_id LVS_DEVEL
    }
    vrrp_script chk_apiserver {
        script "/etc/keepalived/check_apiserver.sh"
        interval 5 
        weight -5
        fall 2
        rise 1
     
    }
    vrrp_instance VI_1 {
        state BACKUP
        interface ens33
        mcast_src_ip 192.168.10.20
        virtual_router_id 51
        priority 100
        nopreempt
        advert_int 2
        authentication {
            auth_type PASS
            auth_pass K8SHA_KA_AUTH
        }
        virtual_ipaddress {
            192.168.10.100
        }
        track_script {
          chk_apiserver 
        } 
    }
    

    master03 keepalived

    ! Configuration File for keepalived
    global_defs {
        router_id LVS_DEVEL
    }
    vrrp_script chk_apiserver {
        script "/etc/keepalived/check_apiserver.sh"
        interval 5
        weight -5
        fall 2  
        rise 1
    }
    vrrp_instance VI_1 {
        state BACKUP
        interface ens33
        mcast_src_ip 192.168.10.30
        virtual_router_id 51
        priority 100
        nopreempt
        advert_int 2
        authentication {
            auth_type PASS
            auth_pass K8SHA_KA_AUTH
        }
        virtual_ipaddress {
            192.168.10.100
        }
        track_script {
          chk_apiserver 
        } 
    }
    

    健康检查配置

    所有master节点

    [root@k8s-master01 ~]# vim /etc/keepalived/check_apiserver.sh
    #!/bin/bash
    
    err=0
    for k in $(seq 1 3)
    do
        check_code=$(pgrep haproxy)
        if [[ $check_code == "" ]]; then
            err=$(expr $err + 1)
            sleep 1
            continue
        else
            err=0
            break
        fi
    done
    
    if [[ $err != "0" ]]; then
        echo "systemctl stop keepalived"
        /usr/bin/systemctl stop keepalived
        exit 1
    else
        exit 0
    fi
    
    [root@k8s-master01 ~]# scp_file.sh master /etc/keepalived/check_apiserver.sh
    [root@k8s-master01 ~]# chmod +x /etc/keepalived/check_apiserver.sh
    

    所有master节点启动haproxy和keepalived

    systemctl daemon-reload
    systemctl enable --now haproxy
    systemctl enable --now keepalived
    

    VIP测试是否正常,其他节点上测试

    如果安装了keepalived和haproxy,需要测试keepalived是否是正常的

    [root@k8s-master03 ~]# ping 192.168.10.100
    PING 192.168.10.100 (192.168.10.100) 56(84) bytes of data.
    64 bytes from 192.168.10.100: icmp_seq=1 ttl=64 time=0.213 ms
    
    [root@k8s-master03 ~]# telnet 192.168.10.100 8443
    Trying 192.168.10.100...
    Connected to 192.168.10.100.
    Escape character is '^]'.
    Connection closed by foreign host.
    

    Kubernetes组件配置

    所有节点创建相关目录

    mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
    

    Apiserver

    所有Master节点创建kube-apiserver service

    注意:如果不是高可用集群,192.168.10.100改为master01的地址

    Master01配置

    本文k8s service网段为10.96.0.0/16,该网段不能和宿主机的网段、Pod网段的重复,请按需修改

    [root@k8s-master01 ~]# vim /usr/lib/systemd/system/kube-apiserver.service
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/kubernetes/kubernetes
    After=network.target
    
    [Service]
    ExecStart=/usr/local/bin/kube-apiserver \
          --v=2  \
          --logtostderr=true  \
          --allow-privileged=true  \
          --bind-address=0.0.0.0  \
          --secure-port=6443  \
          --insecure-port=0  \
          --advertise-address=192.168.10.10 \
          --service-cluster-ip-range=10.96.0.0/16  \
          --service-node-port-range=30000-32767  \
          --etcd-servers=https://192.168.10.10:2379,https://192.168.10.20:2379,https://192.168.10.30:2379 \
          --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
          --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
          --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
          --client-ca-file=/etc/kubernetes/pki/ca.pem  \
          --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
          --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
          --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
          --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
          --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
          --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
          --service-account-issuer=https://kubernetes.default.svc.cluster.local \
          --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
          --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
          --authorization-mode=Node,RBAC  \
          --enable-bootstrap-token-auth=true  \
          --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
          --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
          --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
          --requestheader-allowed-names=aggregator  \
          --requestheader-group-headers=X-Remote-Group  \
          --requestheader-extra-headers-prefix=X-Remote-Extra-  \
          --requestheader-username-headers=X-Remote-User
          # --token-auth-file=/etc/kubernetes/token.csv
    
    Restart=on-failure
    RestartSec=10s
    LimitNOFILE=65535
    
    [Install]
    WantedBy=multi-user.target
    

    Master02配置

    vim /usr/lib/systemd/system/kube-apiserver.service
    
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/kubernetes/kubernetes
    After=network.target
    
    [Service]
    ExecStart=/usr/local/bin/kube-apiserver \
          --v=2  \
          --logtostderr=true  \
          --allow-privileged=true  \
          --bind-address=0.0.0.0  \
          --secure-port=6443  \
          --insecure-port=0  \
          --advertise-address=192.168.10.20 \
          --service-cluster-ip-range=10.96.0.0/16  \
          --service-node-port-range=30000-32767  \
          --etcd-servers=https://192.168.10.10:2379,https://192.168.10.20:2379,https://192.168.10.30:2379 \
          --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
          --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
          --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
          --client-ca-file=/etc/kubernetes/pki/ca.pem  \
          --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
          --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
          --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
          --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
          --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
          --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
          --service-account-issuer=https://kubernetes.default.svc.cluster.local \
          --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
          --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
          --authorization-mode=Node,RBAC  \
          --enable-bootstrap-token-auth=true  \
          --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
          --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
          --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
          --requestheader-allowed-names=aggregator  \
          --requestheader-group-headers=X-Remote-Group  \
          --requestheader-extra-headers-prefix=X-Remote-Extra-  \
          --requestheader-username-headers=X-Remote-User
          # --token-auth-file=/etc/kubernetes/token.csv
    
    Restart=on-failure
    RestartSec=10s
    LimitNOFILE=65535
    
    [Install]
    WantedBy=multi-user.target
    

    Master03配置

    vim /usr/lib/systemd/system/kube-apiserver.service
    
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/kubernetes/kubernetes
    After=network.target
    
    [Service]
    ExecStart=/usr/local/bin/kube-apiserver \
          --v=2  \
          --logtostderr=true  \
          --allow-privileged=true  \
          --bind-address=0.0.0.0  \
          --secure-port=6443  \
          --insecure-port=0  \
          --advertise-address=192.168.10.30 \
          --service-cluster-ip-range=10.96.0.0/16  \
          --service-node-port-range=30000-32767  \
          --etcd-servers=https://192.168.10.10:2379,https://192.168.10.20:2379,https://192.168.10.30:2379 \
          --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
          --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
          --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
          --client-ca-file=/etc/kubernetes/pki/ca.pem  \
          --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
          --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
          --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
          --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
          --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
          --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
          --service-account-issuer=https://kubernetes.default.svc.cluster.local \
          --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
          --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
          --authorization-mode=Node,RBAC  \
          --enable-bootstrap-token-auth=true  \
          --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
          --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
          --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
          --requestheader-allowed-names=aggregator  \
          --requestheader-group-headers=X-Remote-Group  \
          --requestheader-extra-headers-prefix=X-Remote-Extra-  \
          --requestheader-username-headers=X-Remote-User
          # --token-auth-file=/etc/kubernetes/token.csv
    
    Restart=on-failure
    RestartSec=10s
    LimitNOFILE=65535
    
    [Install]
    WantedBy=multi-user.target
    

    启动apiserver

    所有Master节点开启kube-apiserver

    systemctl daemon-reload
    systemctl enable --now kube-apiserver
    

    检测kube-server状态

    systemctl status kube-apiserver
    


    如果系统日志有这些提示可以忽略

    Dec 11 20:51:15 k8s-master01 kube-apiserver: I1211 20:51:15.004739    7450 clientconn.go:948] ClientConn switching balancer to "pick_first"
    Dec 11 20:51:15 k8s-master01 kube-apiserver: I1211 20:51:15.004843    7450 balancer_conn_wrappers.go:78] pickfirstBalancer: HandleSubConnStateChange: 0xc011bd4c80, {CONNECTING <nil>}
    Dec 11 20:51:15 k8s-master01 kube-apiserver: I1211 20:51:15.010725    7450 balancer_conn_wrappers.go:78] pickfirstBalancer: HandleSubConnStateChange: 0xc011bd4c80, {READY <nil>}
    Dec 11 20:51:15 k8s-master01 kube-apiserver: I1211 20:51:15.011370    7450 controlbuf.go:508] transport: loopyWriter.run returning. connection error: desc = "transport is closing"
    

    ControllerManager

    所有Master节点配置 kube-controller-manager service (所有master节点配置一样)

    本文使用的k8s Pod网段为172.16.0.0/12,该网段不能和宿主机的网段、k8s Service网段的重复,请按需修改

    [root@k8s-master01 ~]# vim /usr/lib/systemd/system/kube-controller-manager.service
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/kubernetes/kubernetes
    After=network.target
    
    [Service]
    ExecStart=/usr/local/bin/kube-controller-manager \
          --v=2 \
          --logtostderr=true \
          --address=127.0.0.1 \
          --root-ca-file=/etc/kubernetes/pki/ca.pem \
          --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
          --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
          --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
          --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
          --leader-elect=true \
          --use-service-account-credentials=true \
          --node-monitor-grace-period=40s \
          --node-monitor-period=5s \
          --pod-eviction-timeout=2m0s \
          --controllers=*,bootstrapsigner,tokencleaner \
          --allocate-node-cidrs=true \
          --cluster-cidr=172.16.0.0/12 \
          --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
          --node-cidr-mask-size=24
          
    Restart=always
    RestartSec=10s
    
    [Install]
    WantedBy=multi-user.target
    
    [root@k8s-master01 ~]# scp_file.sh master /usr/lib/systemd/system/kube-controller-manager.service
    

    Master节点启动kube-controller-manager,并查看状态

    systemctl daemon-reload
    systemctl enable --now kube-controller-manager
    systemctl  status kube-controller-manager
    

    Scheduler

    所有Master节点配置 kube-scheduler service (所有master节点配置一样)

    [root@k8s-master01 ~]# vim /usr/lib/systemd/system/kube-scheduler.service
    [Unit]
    Description=Kubernetes Scheduler
    Documentation=https://github.com/kubernetes/kubernetes
    After=network.target
    
    [Service]
    ExecStart=/usr/local/bin/kube-scheduler \
          --v=2 \
          --logtostderr=true \
          --address=127.0.0.1 \
          --leader-elect=true \
          --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
    
    Restart=always
    RestartSec=10s
    
    [Install]
    WantedBy=multi-user.target
    
    [root@k8s-master01 ~]# scp_file.sh master /usr/lib/systemd/system/kube-scheduler.service
    [root@k8s-master01 ~]# systemctl daemon-reload
    [root@k8s-master01 ~]# systemctl enable --now kube-scheduler
    

    TLS Bootstrapping配置

    一文读懂TLS Bootstrapping

    只需要在Master01创建bootstrap

    注意:如果不是高可用集群,192.168.10.100:8443改为master01的地址,8443改为apiserver的端口,默认是6443

    [root@k8s-master01 ~]# cd ~/kubernetes/manual-installation-v1.23.x/bootstrap/
    [root@k8s-master01 bootstrap]# kubectl config set-cluster kubernetes     --certificate-authority=/etc/kubernetes/pki/ca.pem     --embed-certs=true     --server=https://192.168.10.100:8443     --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
    Cluster "kubernetes" set.
    [root@k8s-master01 bootstrap]# kubectl config set-credentials tls-bootstrap-token-user     --token=c8ad9c.2e4d610cf3e7426e --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
    User "tls-bootstrap-token-user" set.
    [root@k8s-master01 bootstrap]# kubectl config set-context tls-bootstrap-token-user@kubernetes     --cluster=kubernetes     --user=tls-bootstrap-token-user     --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
    Context "tls-bootstrap-token-user@kubernetes" created.
    [root@k8s-master01 bootstrap]# kubectl config use-context tls-bootstrap-token-user@kubernetes     --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
    Switched to context "tls-bootstrap-token-user@kubernetes".
    

    注意:如果要修改bootstrap.secret.yaml的token-id和token-secret,需要保证下图的字符串一致的,并且位数是一样的。还要保证c8ad9c.2e4d610cf3e7426e与你修改的字符串要一致

    [root@k8s-master01 ~]# vim /root/kubernetes/manual-installation-v1.23.x/bootstrap/bootstrap.secret.yaml
    

    [root@k8s-master01 ~]# mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
    

    可以正常查询集群状态

    [root@k8s-master01 ~]# kubectl get cs
    Warning: v1 ComponentStatus is deprecated in v1.19+
    NAME                 STATUS    MESSAGE                         ERROR
    controller-manager   Healthy   ok                              
    scheduler            Healthy   ok                              
    etcd-0               Healthy   {"health":"true","reason":""}   
    etcd-1               Healthy   {"health":"true","reason":""}   
    etcd-2               Healthy   {"health":"true","reason":""} 
    

    创建

    [root@k8s-master01 bootstrap]# kubectl create -f bootstrap.secret.yaml
    secret/bootstrap-token-c8ad9c created
    clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
    clusterrolebinding.rbac.authorization.k8s.io/node-autoapprove-bootstrap created
    clusterrolebinding.rbac.authorization.k8s.io/node-autoapprove-certificate-rotation created
    clusterrole.rbac.authorization.k8s.io/system:kube-apiserver-to-kubelet created
    clusterrolebinding.rbac.authorization.k8s.io/system:kube-apiserver created
    

    Node节点配置

    复制证书

    所有节点创建目录

    mkdir -p /etc/kubernetes/pki
    

    Master01节点复制证书至其他节点

    for i in {pki/ca.pem,pki/ca-key.pem,pki/front-proxy-ca.pem,bootstrap-kubelet.kubeconfig}; do scp_file.sh all /etc/kubernetes/$i; done
    for i in {etcd-ca.pem,etcd.pem,etcd-key.pem}; do scp_file.sh all /etc/etcd/ssl/$i; done
    

    Kubelet配置

    所有节点创建相关目录

    mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/
    

    所有节点配置 kubelet service

    [root@k8s-master01 ~]# vim /usr/lib/systemd/system/kubelet.service
    [Unit]
    Description=Kubernetes Kubelet
    Documentation=https://github.com/kubernetes/kubernetes
    After=docker.service
    Requires=docker.service
    
    [Service]
    ExecStart=/usr/local/bin/kubelet
    
    Restart=always
    StartLimitInterval=0
    RestartSec=10
    
    [Install]
    WantedBy=multi-user.target
    [root@k8s-master01 ~]# scp_file.sh all /usr/lib/systemd/system/kubelet.service
    

    Runtime为Containerd

    所有节点配置kubelet service的配置文件(也可以写到kubelet.service

    [root@k8s-master01 ~]# vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf
    [Service]
    Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
    Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock --cgroup-driver=systemd"
    Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
    Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
    ExecStart=
    ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
    [root@k8s-master01 ~]# scp_file.sh all /etc/systemd/system/kubelet.service.d/10-kubelet.conf
    

    Runtime为Docker

    # Runtime为Docker
    # vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf
    
    [Service]
    Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
    Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
    Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.5"
    Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
    ExecStart=
    ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
    

    所有节点创建kubelet的配置文件

    如果更改了k8s的service网段,需要更改kubelet-conf.yml clusterDNS:配置,改成k8s Service网段的第十个地址,比如10.96.0.10

    [root@k8s-master01 ~]#  vim /etc/kubernetes/kubelet-conf.yml
    apiVersion: kubelet.config.k8s.io/v1beta1
    kind: KubeletConfiguration
    address: 0.0.0.0
    port: 10250
    readOnlyPort: 10255
    authentication:
      anonymous:
        enabled: false
      webhook:
        cacheTTL: 2m0s
        enabled: true
      x509:
        clientCAFile: /etc/kubernetes/pki/ca.pem
    authorization:
      mode: Webhook
      webhook:
        cacheAuthorizedTTL: 5m0s
        cacheUnauthorizedTTL: 30s
    cgroupDriver: systemd
    cgroupsPerQOS: true
    clusterDNS:
    - 10.96.0.10
    clusterDomain: cluster.local
    containerLogMaxFiles: 5
    containerLogMaxSize: 10Mi
    contentType: application/vnd.kubernetes.protobuf
    cpuCFSQuota: true
    cpuManagerPolicy: none
    cpuManagerReconcilePeriod: 10s
    enableControllerAttachDetach: true
    enableDebuggingHandlers: true
    enforceNodeAllocatable:
    - pods
    eventBurst: 10
    eventRecordQPS: 5
    evictionHard:
      imagefs.available: 15%
      memory.available: 100Mi
      nodefs.available: 10%
      nodefs.inodesFree: 5%
    evictionPressureTransitionPeriod: 5m0s
    failSwapOn: true
    fileCheckFrequency: 20s
    hairpinMode: promiscuous-bridge
    healthzBindAddress: 127.0.0.1
    healthzPort: 10248
    httpCheckFrequency: 20s
    imageGCHighThresholdPercent: 85
    imageGCLowThresholdPercent: 80
    imageMinimumGCAge: 2m0s
    iptablesDropBit: 15
    iptablesMasqueradeBit: 14
    kubeAPIBurst: 10
    kubeAPIQPS: 5
    makeIPTablesUtilChains: true
    maxOpenFiles: 1000000
    maxPods: 110
    nodeStatusUpdateFrequency: 10s
    oomScoreAdj: -999
    podPidsLimit: -1
    registryBurst: 10
    registryPullQPS: 5
    resolvConf: /etc/resolv.conf
    rotateCertificates: true
    runtimeRequestTimeout: 2m0s
    serializeImagePulls: true
    staticPodPath: /etc/kubernetes/manifests
    streamingConnectionIdleTimeout: 4h0m0s
    syncFrequency: 1m0s
    volumeStatsAggPeriod: 1m0s
    

    启动所有节点kubelet

    systemctl daemon-reload
    systemctl enable --now kubelet
    systemctl status kubelet
    

    由于还没有安装网络的插件会报

    Container runtime network not ready


    查看集群状态

    [root@k8s-master01 ~]# kubectl get node
    NAME           STATUS     ROLES    AGE    VERSION
    k8s-master01   NotReady   <none>   4m9s   v1.23.8
    k8s-master02   NotReady   <none>   4m8s   v1.23.8
    k8s-master03   NotReady   <none>   4m9s   v1.23.8
    k8s-node01     NotReady   <none>   4m8s   v1.23.8
    k8s-node02     NotReady   <none>   4m9s   v1.23.8
    k8s-node03     NotReady   <none>   4m9s   v1.23.8
    

    kube-proxy配置

    如果不是高可用集群,192.168.10.100:8443改为master01的地址,8443改为apiserver的端口,默认是6443

    master01上执行

    cd ~/kubernetes/manual-installation-v1.23.x/
    kubectl -n kube-system create serviceaccount kube-proxy
    
    kubectl create clusterrolebinding system:kube-proxy         --clusterrole system:node-proxier         --serviceaccount kube-system:kube-proxy
    
    SECRET=$(kubectl -n kube-system get sa/kube-proxy \
        --output=jsonpath='{.secrets[0].name}')
    
    JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET \
    --output=jsonpath='{.data.token}' | base64 -d)
    
    PKI_DIR=/etc/kubernetes/pki
    K8S_DIR=/etc/kubernetes
    
    kubectl config set-cluster kubernetes     --certificate-authority=/etc/kubernetes/pki/ca.pem     --embed-certs=true     --server=https://192.168.10.100:8443     --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
    
    kubectl config set-credentials kubernetes     --token=${JWT_TOKEN}     --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
    
    kubectl config set-context kubernetes     --cluster=kubernetes     --user=kubernetes     --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
    
    kubectl config use-context kubernetes     --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
    

    将kubeconfig发送至其他节点

    scp_file.sh master /etc/kubernetes/kube-proxy.kubeconfig
    scp_file.sh node /etc/kubernetes/kube-proxy.kubeconfig
    

    所有节点添加kube-proxy的配置和service文件

    [root@k8s-master01 ~]# vim /usr/lib/systemd/system/kube-proxy.service
    [Unit]
    Description=Kubernetes Kube Proxy
    Documentation=https://github.com/kubernetes/kubernetes
    After=network.target
    
    [Service]
    ExecStart=/usr/local/bin/kube-proxy \
      --config=/etc/kubernetes/kube-proxy.yaml \
      --v=2
    
    Restart=always
    RestartSec=10s
    
    [Install]
    WantedBy=multi-user.target
    [root@k8s-master01 ~]# scp_file.sh all /usr/lib/systemd/system/kube-proxy.service
    

    如果更改了集群Pod的网段,需要更改kube-proxy.yaml的clusterCIDR为自己的Pod网段

    [root@k8s-master01 ~]# vim /etc/kubernetes/kube-proxy.yaml
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    bindAddress: 0.0.0.0
    clientConnection:
      acceptContentTypes: ""
      burst: 10
      contentType: application/vnd.kubernetes.protobuf
      kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
      qps: 5
    clusterCIDR: 172.16.0.0/12 
    configSyncPeriod: 15m0s
    conntrack:
      max: null
      maxPerCore: 32768
      min: 131072
      tcpCloseWaitTimeout: 1h0m0s
      tcpEstablishedTimeout: 24h0m0s
    enableProfiling: false
    healthzBindAddress: 0.0.0.0:10256
    hostnameOverride: ""
    iptables:
      masqueradeAll: false
      masqueradeBit: 14
      minSyncPeriod: 0s
      syncPeriod: 30s
    ipvs:
      masqueradeAll: true
      minSyncPeriod: 5s
      scheduler: "rr"
      syncPeriod: 30s
    kind: KubeProxyConfiguration
    metricsBindAddress: 127.0.0.1:10249
    mode: "ipvs"
    nodePortAddresses: null
    oomScoreAdj: -999
    portRange: ""
    udpIdleTimeout: 250ms
    [root@k8s-master01 ~]# scp_file.sh all /etc/kubernetes/kube-proxy.yaml
    

    所有节点启动kube-proxy

    systemctl daemon-reload
    systemctl enable --now kube-proxy
    

    安装Calico

    安装官方推荐版本

    master01上执行

    [root@k8s-master01 ~]# cd ~/kubernetes/manual-installation-v1.23.x/calico/
    更改calico的网段,主要需要将网段,改为自己的Pod网段
    [root@k8s-master01 calico]# sed -i "s#POD_CIDR#172.16.0.0/12#g" calico.yaml
    [root@k8s-master01 calico]# grep "IPV4POOL_CIDR" calico.yaml -A 1
                - name: CALICO_IPV4POOL_CIDR
                  value: "172.16.0.0/12"
    [root@k8s-master01 calico]# kubectl apply -f calico.yaml
    

    查看容器状态

    [root@k8s-master01 calico]# kubectl get pod -n kube-system
    NAME                                       READY   STATUS    RESTARTS   AGE
    calico-kube-controllers-6f6595874c-c9jbg   1/1     Running   0          2m39s
    calico-node-27t89                          1/1     Running   0          2m39s
    calico-node-7dnlf                          1/1     Running   0          2m39s
    calico-node-dwtl9                          1/1     Running   0          2m39s
    calico-node-hdsdc                          1/1     Running   0          2m39s
    calico-node-hrcvj                          1/1     Running   0          2m39s
    calico-node-lbstq                          1/1     Running   0          2m39s
    calico-typha-6b6cf8cbdf-fbqkt              1/1     Running   0          2m39s
    

    如果容器状态异常可以使用kubectl describe 或者kubectl logs查看容器的日志
    安装calico之后再进行查看/var/log/messages没有一直打印的错误日志了

    安装CoreDNS

    安装官方推荐版本

    [root@k8s-master01 calico]# cd ~/kubernetes/manual-installation-v1.23.x/CoreDNS/
    

    如果更改了k8s service的网段需要将coredns的serviceIP改成k8s service网段的第十个IP

    [root@k8s-master01 CoreDNS]# COREDNS_SERVICE_IP=`kubectl get svc | grep kubernetes | awk '{print $3}'`0
    [root@k8s-master01 CoreDNS]# sed -i "s#KUBEDNS_SERVICE_IP#${COREDNS_SERVICE_IP}#g" coredns.yaml
    

    安装coredns

    [root@k8s-master01 CoreDNS]# kubectl create -f coredns.yaml 
    serviceaccount/coredns created
    clusterrole.rbac.authorization.k8s.io/system:coredns created
    clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
    configmap/coredns created
    deployment.apps/coredns created
    service/kube-dns created
    

    安装最新版CoreDNS

    COREDNS_SERVICE_IP=`kubectl get svc | grep kubernetes | awk '{print $3}'`0
    
    git clone https://github.com/coredns/deployment.git
    cd deployment/kubernetes
    # ./deploy.sh -s -i ${COREDNS_SERVICE_IP} | kubectl apply -f -
    serviceaccount/coredns created
    clusterrole.rbac.authorization.k8s.io/system:coredns created
    clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
    configmap/coredns created
    deployment.apps/coredns created
    service/kube-dns created
    查看状态
     # kubectl get po -n kube-system -l k8s-app=kube-dns
    NAME                       READY   STATUS    RESTARTS   AGE
    coredns-85b4878f78-h29kh   1/1     Running   0          8h
    

    安装Metrics Server

    在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率。

    安装metrics server

    [root@k8s-master01 ~]# cd ~/kubernetes/manual-installation-v1.23.x/metrics-server/
    [root@k8s-master01 metrics-server]# kubectl create -f .
    

    等待metrics server启动然后查看状态

    [root@k8s-master01 ~]# kubectl top node
    NAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
    k8s-master01   152m         2%     1977Mi          51%       
    k8s-master02   138m         3%     1399Mi          36%       
    k8s-master03   113m         2%     1279Mi          33%       
    k8s-node01     57m          1%     679Mi           17%       
    k8s-node02     59m          1%     678Mi           17%       
    k8s-node03     72m          1%     740Mi           19% 
    

    安装Dashboard

    Dashboard用于展示集群中的各类资源,同时也可以通过Dashboard实时查看Pod的日志和在容器中执行一些命令等。

    安装指定版本dashboard

    [root@k8s-master01 ~]# cd ~/kubernetes/manual-installation-v1.23.x/dashboard/
    [root@k8s-master01 dashboard]# kubectl  create -f .
    

    安装最新版dashboard

    官方GitHub地址
    可以在官方dashboard查看到最新版dashboard

    登录dashboard

    在谷歌浏览器Chrome启动快捷方式中加入启动参数,用于解决无法访问Dashboard的问题

    --test-type --ignore-certificate-errors
    

    更改dashboard的svc为NodePort

    kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
    

    将ClusterIP更改为NodePort(如果已经为NodePort忽略此步骤)

    查看暴露的端口号

    [root@k8s-master01 ~]# kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
    NAME                   TYPE       CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
    kubernetes-dashboard   NodePort   10.96.93.0   <none>        443:30891/TCP   8m31s
    

    根据自己的实例端口号,通过任意安装了kube-proxy的宿主机的IP+端口即可访问到dashboard

    https+主机IP+暴露的端口号


    已经创建用户的话,查询token

    kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
    

    生产环境关键性配置

    修改Docker配置

    Docker配置 采用containerd作为Runtime无需配置
    vim /etc/docker/daemon.json
    {  "registry-mirrors": [
        "https://registry.docker-cn.com",
        "http://hub-mirror.c.163.com",
        "https://docker.mirrors.ustc.edu.cn"
      ],
     "exec-opts": ["native.cgroupdriver=systemd"],
     "max-concurrent-downloads": 10,   # 并发下载的线程数
     "max-concurrent-uploads": 5,   # 并发上传的线程数
     "log-opts": {
       "max-size": "300m",   # 限制日志文件大小,到此大小进行分割
       "max-file": "2"        # 限制保存的日志数量,按实际情况修改
     },
     "live-restore": true    # 重启docker进程不重启docker应用
    } 
    

    修改证书有效期

    通过Bootstrapping申请controller-manager颁发的证书,默认有效期为一年,在内部环境可以设置更长

     vim /usr/lib/systemd/system/kube-controller-manager.service
    
    # 设置证书有效期,因为证书最长的有效期应该是五年,设置再多可能也是五年,kubelet会在快过期的时候重新进行 申请
    --cluster-signing-duration=876000h0m0s \ 
    
    # 在自动申请证书的时候进行自动颁发一个,在新版本中已经默认为true,所以不需要进行配置
    # --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \
    

    修改kubelet配置文件

    vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf
    [Service]
    Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
    Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock --cgroup-driver=systemd"
    Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
    Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384    --image-pull-progress-deadline=30m"
    ExecStart=
    ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS
    

    如果公司内有安全团队会进行漏扫,k8s默认的加密方式比较简单,更改加密方式

    添加:--tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384

    设置下载拉取镜像的时间,下载公网的镜像会比较慢,默认下载时间很短

    添加:--image-pull-progress-deadline=30m

    [root@k8s-master01 ~]# systemctl daemon-reload
    [root@k8s-master01 ~]# systemctl restart kubelet
    

    新版本的k8s配置文件都建议放在 /etc/kubernetes/kubelet-conf.yml ,慢慢的参数都会挪到这个配置文件中,包括上面的参数

    [root@k8s-master01 ~]# vim /etc/kubernetes/kubelet-conf.yml
    最后添加
    rotateServerCertificates: true
    allowedUnsafeSysctls:     # 默认不允许修改内核参数(并发量、文件打开数等)
     - "net.core*"            # 设置参数允许修改内核,可能涉及到安全问题,按需配置
     - "net.ipv4.*"
    kubeReserved:             # 给k8s组件预留资源
      cpu: "1"
      memory: 1Gi
      ephemeral-storage: 10Gi
    systemReserved:           # 给k8s系统预留资源     
      cpu: "1"
      memory: 1Gi
      ephemeral-storage: 10Gi
    
    [root@k8s-master01 ~]# systemctl daemon-reload
    [root@k8s-master01 ~]# systemctl restart kubelet
    

    修改主机ROLES、labels

    查看目前ROLES为none,修改k8s-mastre01的ROLES为master

    因为k8s对于k8s中的节点属于哪个角色是没有感知的,master节点就比node节点多安装几个组件而已,对于角色ROLES的定义需要人为的区分

    [root@k8s-master01 ~]# kubectl get node
    NAME           STATUS   ROLES    AGE   VERSION
    k8s-master01   Ready    <none>   19h   v1.23.8
    k8s-master02   Ready    <none>   19h   v1.23.8
    k8s-master03   Ready    <none>   19h   v1.23.8
    k8s-node01     Ready    <none>   19h   v1.23.8
    k8s-node02     Ready    <none>   19h   v1.23.8
    k8s-node03     Ready    <none>   19h   v1.23.8
    [root@k8s-master01 ~]# kubectl get node --show-labels
    NAME           STATUS   ROLES    AGE   VERSION   LABELS
    k8s-master01   Ready    <none>   19h   v1.23.8   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-master01,kubernetes.io/os=linux,node.kubernetes.io/node=
    k8s-master02   Ready    <none>   19h   v1.23.8   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-master02,kubernetes.io/os=linux,node.kubernetes.io/node=
    k8s-master03   Ready    <none>   19h   v1.23.8   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-master03,kubernetes.io/os=linux,node.kubernetes.io/node=
    k8s-node01     Ready    <none>   19h   v1.23.8   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node01,kubernetes.io/os=linux,node.kubernetes.io/node=
    k8s-node02     Ready    <none>   19h   v1.23.8   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node02,kubernetes.io/os=linux,node.kubernetes.io/node=
    k8s-node03     Ready    <none>   19h   v1.23.8   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node03,kubernetes.io/os=linux,node.kubernetes.io/node=
    [root@k8s-master01 ~]# kubectl label node k8s-master01 node-role.kubernetes.io/master=''
    node/k8s-master01 labeled
    [root@k8s-master01 ~]# kubectl get node
    NAME           STATUS   ROLES    AGE   VERSION
    k8s-master01   Ready    master   19h   v1.23.8
    k8s-master02   Ready    <none>   19h   v1.23.8
    k8s-master03   Ready    <none>   19h   v1.23.8
    k8s-node01     Ready    <none>   19h   v1.23.8
    k8s-node02     Ready    <none>   19h   v1.23.8
    k8s-node03     Ready    <none>   19h   v1.23.8
    

    生产建议

    1、生产环境一定要用二进制组件安装
    2、etcd一定要和系统盘分开,必须使用ssd硬盘
    3、Docker数据盘和系统盘分开,也尽量使用ssd硬盘

  • 相关阅读:
    selenium+phantomjs爬取bilibili
    使用 python 开发 Web Service
    OBIEE 立方刷新的问题
    解析OracleOLAP使用MView刷新Cube
    Codeforces Round #755 (Div. 2, based on Technocup 2022 Elimination Round 2)(CF1589)题解
    Codeforces Round #754 (Div. 2)(CF1605)题解
    完美解读Linux中文件系统的目录结构
    C#中获取程序当前路径的集中方法
    30个优秀.net在线学习资源站点
    如何删除windows service(转帖)
  • 原文地址:https://www.cnblogs.com/chuyiwang/p/16420077.html
Copyright © 2020-2023  润新知