• kubeadm方式搭建k8s 1.20版本


    前期环境说明

    主机预设系统环境

    本测试环境由master01、node01、node02、node03这四台主机组成,它们分别有2个核心CPU,4G内存,各主机预设系统环境如下:
    1、修改主机名、时间同步
    2、通过DNS完成各节点的主机名解析,测试环境主机数量较少时也可使用hosts文件进行解析
    3、关闭各节点iptables、firewalld服务并确保它们被禁止随系统引导过程启动
    4、各节点禁止所有的swap设备
    5、若要使用ipvs模型的proxy,各节点还需要载入ipvs相关的模块
    

    主机名解析

    所有节点执行

    [root@master01 ~]# vim /etc/hosts
    173.172.16.186.111	master.ik8s.com master01
    172.16.186.112  node01.ik8s.com node01
    172.16.186.113  node02.ik8s.com node02
    172.16.186.114  node03.ik8s.com node03
    [root@master01 ~]# 
    
    

    时间同步

    以2台为例,其他都这样设置
    hostnamectl set-hostname master01
    hostnamectl set-hostname node01
    
    [root@master01 ~]# vim /etc/chrony.conf
    #server 0.centos.pool.ntp.org iburst
    #server 1.centos.pool.ntp.org iburst
    #server 2.centos.pool.ntp.org iburst
    #server 3.centos.pool.ntp.org iburst
    server ntp.aliyun.com iburst
    
    [root@master01 ~]# systemctl restart chronyd
    
    [root@node01 ~]# vim /etc/chrony.conf
    #server 0.centos.pool.ntp.org iburst
    #server 1.centos.pool.ntp.org iburst
    #server 2.centos.pool.ntp.org iburst
    #server 3.centos.pool.ntp.org iburst
    server master01 iburst
    
    [root@node01 ~]# systemctl restart chronyd
    

    关闭iptables或firewall、selinux

    所有节点执行

    # 默认OS没安装iptables
    systemctl stop firewalld && systemctl disable firewalld
    sed -i "s/^(SELINUX=).*/1disabled/" /etc/selinux/config
    setenforce 0
    swapoff -a
    vim /etc/fstab
    #/dev/mapper/centos-swap swap   swap   defaults    0 0       #注释该行
    

    启用ipvs内核模块

    所有节点执行

    vim /etc/sysconfig/modules/ipvs.modules
    #!/bin/bash
    ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
    for i in $(ls $ipvs_mods_dir | grep -o "^[^.]*");do
        /sbin/modinfo -F filename $i &>/dev/null
        if [ $? -eq 0 ];then
            /sbin/modprobe $i
        fi
    done
    
    chmod +x /etc/sysconfig/modules/ipvs.modules
    bash /etc/sysconfig/modules/ipvs.modules
    

    安装程序包

    安装docker

    所有节点执行

    yum仓库配置
    mkdir /etc/docker
    cat>> /etc/docker/daemon.json<<EOF
    {
      "exec-opts": ["native.cgroupdriver=systemd"],
      "registry-mirrors" : [
          "https://registry.docker-cn.com",
          "https://docker.mirrors.ustc.edu.cn",
          "http://hub-mirror.c.163.com",
          "https://cr.console.aliyun.com/",
          "https://0trl8ny5.mirror.aliyuncs.com"
      ]
    }
    EOF
    
    yum -y install yum-utils
    yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    yum makecache fast
    # yum list docker-ce --showduplicates | sort -r      #查看所有docker版本
    yum -y install docker-ce-19.03.9 docker-ce-cli-19.03.9 containerd.io
    
    # docker自1.13版起会自动设置iptables的FOREARRD默认策略为DROP,这可能会影响k8s集群依赖的报文转发功能,因此需要在docker服务器启动后(或后)重新将FORWARD链的默认策略设置为ACCEPT,这里在启动前设置
    vim /usr/lib/systemd/system/docker.service
    [Service]
    Environment="NO_PROXY=127.0.0.0/8,172.16.0.0/24"      #不代理这2个网络
    ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
    ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT
    注:意为在docker启动后执行ExecStartPost将FORWARD改成ACCEPT状态
    
    systemctl daemon-reload
    systemctl start docker && systemctl enable docker
    
    检查核对
    docker info | grep "No Proxy"
    iptables -vnL | grep "Chain FORWARD"
    
    
    cat > /etc/sysctl.d/k8s.conf << EOF
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    EOF
    
    sysctl -p /etc/sysctl.d/k8s.conf
    

    安装k8s

    # k8s源要在所有节点上安装配置
    vim /etc/yum.repos.d/k8s.repo
    [k8s]
    name=k8s repo
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    gpgcheck=1
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
            https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
    enabled=1
    
    yum repolist
    yum list all | grep "^kube"
    
    # master上安装
    [root@master01 ~]# yum -y install kubeadm-1.20.8 kubelet-1.20.8 kubectl-1.20.8
    [root@master01 ~]# systemctl start kubelet && systemctl enable kubelet
    [root@master01 ~]# for i in kubelet kubeadm kubectl;do rpm -ql $i;done
    ============= 按需执行项 ==============
    vim /etc/sysconfig/kubelet
    KUBELET_EXTRA_ARGS="--fail-swap-on=false"
    注:意为如swaq是启用状态,在初始化集群时是要要报错,本文档已关闭了swap所以这里不做修改
    
    # 初始化集群前参数说明
    [root@master01 ~]# kubeadm --help
    Available Commands:
    certs       Commands related to handling kubernetes certificates
    completion 该参数对shell自动补全
    config     管理保存在集群 ConfigMap 中的 kubeadm 集群的配置,下面会有单独的说明
    init       只有在master节点上初始化集群时才使用该参数
    join       只有在要将node节点加入到集群中时才在node节点上使用该参数
    reset      无论是master还是node节点想把之前初始化的功能、组件等通通还原/重置时才使用该参数
    token      该参数是管理bootstrap令牌的
    upgrade    该参数是将您的集群平稳升级到较新版本
    
    
    config参数说明
    [root@master01 ~]# kubeadm config print -h | egrep '(init|join)-default'
      init-defaults Print default init configuration, that can be used for 'kubeadm init'
      join-defaults Print default join configuration, that can be used for 'kubeadm join'
    
    # 使用kubeadm初始化一个集群时会会加载哪些配置,使用config参数可打印出,如下
    [root@master01 ~]# kubeadm config print init-defaults
    apiVersion: kubeadm.k8s.io/v1beta2
    bootstrapTokens:
    - groups:
      - system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef
      ttl: 24h0m0s
      usages:
      - signing
      - authentication
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 1.2.3.4
      bindPort: 6443
    nodeRegistration:
      criSocket: /var/run/dockershim.sock
      name: node
      taints: null
    ---
    apiServer:
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controllerManager: {}
    dns:
      type: CoreDNS
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: k8s.gcr.io
    kind: ClusterConfiguration
    kubernetesVersion: 1.21.0
    networking:
      dnsDomain: cluster.local
      serviceSubnet: 10.96.0.0/12
    scheduler: {}
    
    
    
    flannel默认网络:10.244.0.0/16
    calico默认网络:192.168.0.0/16
    ======================================
    

    初始化集群

    方式1:
    使用传递参数的方式,使用kubeadm init --help查看所需参数并指定,如下示例
    [root@master01 ~]# kubeadm init --kubernetes-version=v1.21.0 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap
    
    方式2:
    使用yml文件定义好所有的选项,而后使用kubeadm init --config 来加载配置文件
    
    
    这里使用第一种方式
    rpm -q kubeadm
    kubeadm-1.21.2-0.x86_64
    正式初始化集群前先单独把镜像pull下来,先列出要pull的镜像有哪些
    [root@master01 ~]# kubeadm config images list          #这些是当前的版本
    k8s.gcr.io/kube-apiserver:v1.21.2
    k8s.gcr.io/kube-controller-manager:v1.21.2
    k8s.gcr.io/kube-scheduler:v1.21.2
    k8s.gcr.io/kube-proxy:v1.21.2
    k8s.gcr.io/pause:3.4.1
    k8s.gcr.io/etcd:3.4.13-0
    k8s.gcr.io/coredns/coredns:v1.8.0
    
    然后编写脚本在所有节点上进行pull镜像
    vim image_pull.sh
    #!/bin/bash
    # 从新的地址下载所需镜像
    images=(
    kube-apiserver:v1.20.8
    kube-controller-manager:v1.20.8
    kube-scheduler:v1.20.8 
    kube-proxy:v1.20.8 
    pause:3.2 
    etcd:3.4.3-0 
    coredns:1.7.0)
    
    for i in ${images[@]};do 
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$i
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$i  k8s.gcr.io/$i
    docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$i
    done
    
    
    # 在所有节点上执行脚本
    sh -x image_pull.sh
    
    
    # 如需测试跑下集群则需加上--dry-run参数即可,注意,测试时不会真正安装
    kubeadm init --dry-run 
    
    # 初始化集群只在master节点上执行
    [root@master01 ~]# kubeadm init 
    --image-repository registry.aliyuncs.com/google_containers 
    --kubernetes-version="1.20.8" 
    --pod-network-cidr="10.244.0.0/16" 
    --apiserver-advertise-address 172.16.186.111 
    --control-plane-endpoint 172.16.186.111 
    --token-ttl 0
    
    
    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    Alternatively, if you are the root user, you can run:
    
      export KUBECONFIG=/etc/kubernetes/admin.conf
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    You can now join any number of control-plane nodes by copying certificate authorities
    and service account keys on each node and then running the following as root:
    
      kubeadm join 172.16.186.111:6443 --token 27kdvx.icgzqrvfavq2dwf7 
        --discovery-token-ca-cert-hash sha256:9d7af2bc1136db927590535f3ae4ba2fba5873682c68365e67d4d2ec6e113f9f 
        --control-plane 
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 172.16.186.111:6443 --token 27kdvx.icgzqrvfavq2dwf7 
        --discovery-token-ca-cert-hash sha256:9d7af2bc1136db927590535f3ae4ba2fba5873682c68365e67d4d2ec6e113f9f 
        
        
    [root@master01 ~]# mkdir -p $HOME/.kube
    [root@master01 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    [root@master01 ~]# kubectl get nodes
    NAME       STATUS     ROLES                  AGE   VERSION
    master01   NotReady   control-plane,master   14m   v1.20.8
    注:状态为NotReady状态,是因为没有网络插件
    

    部署网络清单插件(flannel)

    注:因为raw.github.com时常访问错误,所以提前要做设置
    打开https://www.ipaddress.com 输入 raw.githubusercontent.com,将解析出来的IP地址加入到本机的/etc/hosts中
    [root@master01 ~]# vim /etc/hosts
    185.199.108.133 raw.githubusercontent.com
    185.199.109.133 raw.githubusercontent.com
    185.199.110.133 raw.githubusercontent.com
    185.199.111.133 raw.githubusercontent.com
    
    
    # https://github.com/flannel-io/flannel
    [root@master01 ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    podsecuritypolicy.policy/psp.flannel.unprivileged created
    clusterrole.rbac.authorization.k8s.io/flannel created
    clusterrolebinding.rbac.authorization.k8s.io/flannel created
    serviceaccount/flannel created
    configmap/kube-flannel-cfg created
    daemonset.apps/kube-flannel-ds created
    
    [root@master01 ~]# kubectl get pod -n kube-system
    NAME                               READY   STATUS    RESTARTS   AGE
    coredns-7f89b7bc75-rsh6q           1/1     Running   0          20m
    coredns-7f89b7bc75-vmdh7           1/1     Running   0          20m
    etcd-master01                      1/1     Running   0          21m
    kube-apiserver-master01            1/1     Running   0          21m
    kube-controller-manager-master01   1/1     Running   0          21m
    kube-flannel-ds-xknps              1/1     Running   0          116s
    kube-proxy-kckxb                   1/1     Running   0          20m
    kube-scheduler-master01            1/1     Running   0          21m
    
    [root@master01 ~]# kubectl get nodes
    NAME       STATUS   ROLES                  AGE   VERSION
    master01   Ready    control-plane,master   21m   v1.20.8
    注:状态已变为Ready
    
    
    8080端口是早期k8s上明文的http协议,后期的k8s已不再使用8080端口
    6443端口是apiserver上的https协议
    apiserver对用户认证是向认证的,而且必须是apiserver自身信任的CA颁发的证书才能获得apiserver的认可,这个CA是在部署k8s时自动生成的,是/etc/kubernetes/pki/ca.crt这个私有CA的CA证书,而/etc/kubernetes/admin.conf中就有一个apiserver自身信任的CA颁发的证书,所以想在集群中任意节点上执行k8s命令,则必须在把这个证书文件放到指定位置
    查看证书:
    [root@master01 ~]# kubectl config view
    apiVersion: v1
    clusters:
    - cluster:
        certificate-authority-data: DATA+OMITTED       #ca证书(已隐藏起来)
        server: https://172.16.186.111:6443
      name: kubernetes
    contexts:
    - context:
        cluster: kubernetes
        user: kubernetes-admin
      name: kubernetes-admin@kubernetes
    current-context: kubernetes-admin@kubernetes
    kind: Config
    preferences: {}
    users:
    - name: kubernetes-admin
      user:
        client-certificate-data: REDACTED         #连接ca时客户端证书(已隐藏起来)
        client-key-data: REDACTED                 #客户端私钥(已隐藏起来)
    
    
    在k8s上资源可分为2个级别:
    (1)集群级别,node是集群级别的资源
    (2)整个集群又可划分为多个名称空间
    查看默认生成的名称空间
    [root@master01 ~]# kubectl get ns
    NAME              STATUS   AGE
    default           Active   83m
    kube-node-lease   Active   83m
    kube-public       Active   83m
    kube-system       Active   83m
    
    其中系统级的pod都运行在kube-system这个名称空间下
    未明确指定名称空间的都使用的是default,
    kube-public是公共的,任何人都可以访问
    
    查看kube-system名称空间下的pod
    [root@master01 ~]# kubectl get pod -n kube-system
    NAME                               READY   STATUS    RESTARTS   AGE
    coredns-7f89b7bc75-rsh6q           1/1     Running   0          87m
    coredns-7f89b7bc75-vmdh7           1/1     Running   0          87m
    ...
    kube-proxy-kckxb                   1/1     Running   0          87m
    kube-scheduler-master01            1/1     Running   0          87m
    或者
    [root@master01 ~]# kubectl get pod -n kube-system -o wide
    NAME                          READY   STATUS    RESTARTS   AGE   IP               NODE       NOMINATED NODE   READINESS GATES
    coredns-7f89b7bc75-rsh6q      1/1     Running   0          88m   10.244.0.2       master01   <none>           <none>
    coredns-7f89b7bc75-vmdh7      1/1     Running   0          88m   10.244.0.3       master01   <none>           <none>
    ...
    kube-proxy-kckxb              1/1     Running   0          88m   172.16.186.111   master01   <none>           <none>
    kube-scheduler-master01       1/1     Running   0          88m   172.16.186.111   master01   <none>           <none>
    注意:有些pod的是10的地址,有些pod是172的地址,172的地址这个pod共享了宿主机的网络名称空间
    
    
    master节点到此安装完成
    

    配置其他所有node节点

    yum -y install kubeadm-1.20.8 kubelet-1.20.8
    systemctl start kubelet.service && systemctl enable kubelet.service
    
    在所有node节点上执行如下命令,表示将node节点添加到集群中,下面命令是在master节点上初始化好集群后的回显中规定的,建议一个节点一个节点执行,不要同时在node节点上执行
    kubeadm join 172.16.186.111:6443 --token 27kdvx.icgzqrvfavq2dwf7 
    --discovery-token-ca-cert-hash sha256:9d7af2bc1136db927590535f3ae4ba2fba5873682c68365e67d4d2ec6e113f9f 
    
    回到master节点查看各node状态
    kubectl get nodes
    NAME       STATUS   ROLES                  AGE     VERSION
    master01   Ready    control-plane,master   45m     v1.20.8
    node01     Ready    <none>                 8m46s   v1.20.8
    node02     Ready    <none>                 8m30s   v1.20.8
    node03     Ready    <none>  
    
    现在在所有node节点上都不能查看node节点状态的命令,如下
    [root@node01 ~]# kubectl get nodes
    The connection to the server localhost:8080 was refused - did you specify the right host or port?
    
    解决该问题
    [root@node01 ~]# mkdir .kube        # 所有node节点上创建好该目录
    [root@master01 ~]# scp .kube/config node01:/root/.kube/       # master节点将配置文件发送到各node节点指定目录中,然后再在node节点上查看
    [root@node01 ~]# kubectl get nodes
    NAME       STATUS   ROLES                  AGE   VERSION
    master01   Ready    control-plane,master   53m   v1.20.8
    node01     Ready    <none>                 16m   v1.20.8
    node02     Ready    <none>                 15m   v1.20.8
    node03     Ready    <none>                 15m   v1.20.8
    
  • 相关阅读:
    容斥原理
    m元集A到n元集B的满射的个数
    二项式反演公式
    多项式定理
    组合数的基本性质
    Luogu P2408 不同子串个数
    Luogu P5410【模板】扩展 KMP
    Luogu P2336 [SCOI2012]喵星球上的点名
    Luogu P2852 [USACO06DEC]牛奶模式Milk Patterns
    Luogu P4248 [AHOI2013]差异
  • 原文地址:https://www.cnblogs.com/smlile-you-me/p/14947700.html
Copyright © 2020-2023  润新知