• kubeadm部署高可用K8S1.15


    节点信息

    IP 角色  操作系统 备注
    10.0.0.1 master centos7.6 keepalived
    10.0.0.2 master centos7.6 keepalived
    10.0.0.3 master centos7.6 keepalived

    1.环境准备

    应事先完成centos7的基础环境配置,包括关闭selinux、关闭防火墙、配置时间同步、配置节点ssh免密。关闭NetworkManager等。

    1.1 升级系统内核

    [root@k8s-master1 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo \
     && wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
    [root@k8s-master1 ~]# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
    [root@k8s-master1 ~]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
    [root@k8s-master1 ~]# yum --disablerepo="*" --enablerepo="elrepo-kernel" list available #lt为长期稳定支持版,ml为最新稳定版
    kernel-lt.x86_64 -.el7.elrepo elrepo-kernel
    ......
    kernel-ml.x86_64 -.el7.elrepo elrepo-kernel
    ......
    #安装内核
    [root@k8s-master1 ~]# yum --enablerepo=elrepo-kernel install kernel-lt-devel kernel-lt -y
    #设置从新安装内核启动
    [root@k8s-master1 ~]# awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg
    CentOS Linux (4.4.197-1.el7.elrepo.x86_64) 7 (Core)
    CentOS Linux (3.10.0-957.el7.x86_64) 7 (Core)
    CentOS Linux (0-rescue-b4c601a613824f9f827cb9787b605efb) 7 (Core)
    [root@k8s-master1 ~]# grub2-set-default 0

    1.2 优化内核参数

    [root@k8s-master1 ~]# cat << EOF | tee  /etc/sysctl.d/k8s.conf
    net.bridge.bridge-nf-call-iptables = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    net.ipv4.ip_forward = 1
    net.ipv4.tcp_tw_recycle = 0  #由于tcp_tw_recycle与kubernetes的NAT冲突,必须关闭!否则会导致服务不通。
    vm.swappiness = 0           #禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
    fs.inotify.max_user_instances = 512
    fs.inotify.max_user_watches = 1280000
    fs.file-max = 2000000
    fs.nr_open = 2000000
    net.ipv6.conf.all.disable_ipv6 = 1  #关闭不使用的ipv6协议栈,防止触发docker BUG.
    net.netfilter.nf_conntrack_max = 524288
    EOF
    [root@k8s-master1 ~]# cat >>/etc/sysctl.conf <<EOF
    net.ipv4.ip_forward = 1
    vm.swappiness = 0
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.tcp_max_syn_backlog = 65536
    net.core.netdev_max_backlog =  32768
    net.core.somaxconn = 32768
    net.core.wmem_default = 8388608
    net.core.rmem_default = 8388608
    net.core.rmem_max = 16777216
    net.core.wmem_max = 16777216
    net.ipv4.tcp_timestamps = 0
    net.ipv4.tcp_synack_retries = 2
    net.ipv4.tcp_syn_retries = 2
    net.ipv4.tcp_tw_recycle = 1
    net.ipv4.tcp_tw_reuse = 1
    net.ipv4.tcp_mem = 94500000 915000000 927000000
    net.ipv4.tcp_max_orphans = 3276800
    net.ipv4.ip_local_port_range = 1024  65535
    EOF
    
    sysctl -p

    1.3 加载 内核ipvs相关模块

    [root@k8s-master1 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
    > #!/bin/bash
    > modprobe -- ip_vs
    > modprobe -- ip_vs_rr
    > modprobe -- ip_vs_wrr
    > modprobe -- ip_vs_sh
    > modprobe -- nf_conntrack_ipv4
    > modprobe -- br_netfilter
    > EOF
    [root@k8s-master1 ~]#
    [root@k8s-master1 ~]# sh  /etc/sysconfig/modules/ipvs.modules 
    [root@k8s-master1 ~]# lsmod  | grep ip_
    ip_vs_sh               16384  0
    ip_vs_wrr              16384  0
    ip_vs_rr               16384  19
    ip_vs                 147456  25 ip_vs_rr,ip_vs_sh,ip_vs_wrr
    nf_conntrack          114688  7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
    libcrc32c              16384  2 xfs,ip_vs

    1.4 关闭swap分区

    [root@k8s-master1 ~]# swapoff -a 
    [root@k8s-master1 ~]# sed -i 's/.*swap.*/#&/' /etc/fstab

    2.安装配置keepalived

    2.1 安装keepalived

    #3个节点均安装keepalived
    [root@k8s-master1 ~]# yum -y install keepalived -y [root@k8s-master1 ~]# cp /etc/keepalived/keepalived.conf{,.bak} [root@k8s-master1 ~]# cat /etc/keepalived/keepalived.conf #注意virtual_router_id需一致 ! Configuration File for keepalived global_defs { router_id k8s-1 } vrrp_script CheckK8sMaster { script "curl -k https://127.0.0.1:6443/api" interval 3 timeout 9 fall 2 rise 2 } vrrp_instance VI_1 { state MASTER interface eth0 virtual_router_id 51 priority 200 advert_int 1 mcast_src_ip 10.0.0.1 nopreempt authentication { auth_type PASS auth_pass 378378 } unicast_peer { 10.0.0.2 10.0.0.3 } virtual_ipaddress { 10.0.0.10 } track_script { CheckK8sMaster } }

    m2节点keepalived配置,m3的省略:

    [root@k8s-master2 ~]# cat /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    
    global_defs {
       router_id k8s-2
    }
    
    vrrp_script CheckK8sMaster {
        script "curl -k https://127.0.0.1:6443/api"
        interval 3
        timeout 9
        fall 2
        rise 2
    }
    
    vrrp_instance VI_1 {
        state BACKUP
        interface  eth0
        virtual_router_id 51
        priority 150
        advert_int 1
        mcast_src_ip 10.0.0.2
        nopreempt
        authentication {
            auth_type PASS
            auth_pass 378378
        }
        unicast_peer {
    
        10.0.0.1
        10.0.0.3
        }
        virtual_ipaddress {
            10.0.0.10
        }
        track_script {
            CheckK8sMaster
        }
    }

    启动服务

    [root@k8s-master1 ~]# systemctl enable keepalived && systemctl start keepalived

    2.2 安装部署K8S

    安装配置docker

    K8S节点均需安装
    [root@k8s-master1 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo [root@k8s-master1 ~]# yum list docker-ce --showduplicates | sort -r [root@k8s-master1 ~]# yum -y install docker-ce-18.09.7-3

    配置docker

    [root@k8s-master1 ~]# cat << EOF > /etc/docker/daemon.json
    {
         "registry-mirrors":[
             "https://c6ai9izk.mirror.aliyuncs.com"
         ],
         "log-driver":"json-file",
         "log-opts":{
             "max-size":"100m"
         },
         "storage-driver":"overlay2",
         "storage-opts": [
         "overlay2.override_kernel_check=true"
         ],
         "live-restore": true,
         "exec-opts": [
         "native.cgroupdriver=systemd"
         ],
         "insecure-registries": ["reg.myhb.com","10.0.0.4"]
     }
    EOF

    注:daemon.json解析

    {
        "authorization-plugins": [],   //访问授权插件
        "data-root": "",   //docker数据持久化存储的根目录
        "dns": [],   //DNS服务器
        "dns-opts": [],   //DNS配置选项,如端口等
        "dns-search": [],   //DNS搜索域名
        "exec-opts": [],   //执行选项
        "exec-root": "",   //执行状态的文件的根目录
        "experimental": false,   //是否开启试验性特性
        "storage-driver": "",   //存储驱动器
        "storage-opts": [],   //存储选项
        "labels": [],   //键值对式标记docker元数据
        "live-restore": true,   //dockerd挂掉是否保活容器(避免了docker服务异常而造成容器退出)
        "log-driver": "",   //容器日志的驱动器
        "log-opts": {},   //容器日志的选项
        ,   //设置容器网络MTU(最大传输单元)
        "pidfile": "",   //daemon PID文件的位置
        "cluster-store": "",   //集群存储系统的URL
        "cluster-store-opts": {},   //配置集群存储
        "cluster-advertise": "",   //对外的地址名称
        ,   //设置每个pull进程的最大并发
        ,   //设置每个push进程的最大并发
        "default-shm-size": "64M",   //设置默认共享内存的大小
        ,   //设置关闭的超时时限(who?)
        "debug": true,   //开启调试模式
        "hosts": [],   //监听地址(?)
        "log-level": "",   //日志级别
        "tls": true,   //开启传输层安全协议TLS
        "tlsverify": true,   //开启输层安全协议并验证远程地址
        "tlscacert": "",   //CA签名文件路径
        "tlscert": "",   //TLS证书文件路径
        "tlskey": "",   //TLS密钥文件路径
        "swarm-default-advertise-addr": "",   //swarm对外地址
        "api-cors-header": "",   //设置CORS(跨域资源共享-Cross-origin resource sharing)头
        "selinux-enabled": false,   //开启selinux(用户、进程、应用、文件的强制访问控制)
        "userns-remap": "",   //给用户命名空间设置 用户/组
        "group": "",   //docker所在组
        "cgroup-parent": "",   //设置所有容器的cgroup的父类(?)
        "default-ulimits": {},   //设置所有容器的ulimit
        "init": false,   //容器执行初始化,来转发信号或控制(reap)进程
        "init-path": "/usr/libexec/docker-init",   //docker-init文件的路径
        "ipv6": false,   //开启IPV6网络
        "iptables": false,   //开启防火墙规则
        "ip-forward": false,   //开启net.ipv4.ip_forward
        "ip-masq": false,   //开启ip掩蔽(IP封包通过路由器或防火墙时重写源IP地址或目的IP地址的技术)
        "userland-proxy": false,   //用户空间代理
        "userland-proxy-path": "/usr/libexec/docker-proxy",   //用户空间代理路径
        "ip": "0.0.0.0",   //默认IP
        "bridge": "",   //将容器依附(attach)到桥接网络上的桥标识
        "bip": "",   //指定桥接ip
        "fixed-cidr": "",   //(ipv4)子网划分,即限制ip地址分配范围,用以控制容器所属网段实现容器间(同一主机或不同主机间)的网络访问
        "fixed-cidr-v6": "",   //(ipv6)子网划分
        "default-gateway": "",   //默认网关
        "default-gateway-v6": "",   //默认ipv6网关
        "icc": false,   //容器间通信
        "raw-logs": false,   //原始日志(无颜色、全时间戳)
        "allow-nondistributable-artifacts": [],   //不对外分发的产品提交的registry仓库
        "registry-mirrors": [],   //registry仓库镜像
        "seccomp-profile": "",   //seccomp配置文件
        "insecure-registries": [],   //非https的registry地址
        "no-new-privileges": false,   //禁止新优先级(??)
        "default-runtime": "runc",   //OCI联盟(The Open Container Initiative)默认运行时环境
        ,   //内存溢出被杀死的优先级(-1000~1000)
        "node-generic-resources": ["NVIDIA-GPU=UUID1", "NVIDIA-GPU=UUID2"],   //对外公布的资源节点
        "runtimes": {   //运行时
            "cc-runtime": {
                "path": "/usr/bin/cc-runtime"
            },
            "custom": {
                "path": "/usr/local/bin/my-runc-replacement",
                "runtimeArgs": [
                    "--debug"
                ]
            }
        }
    }

    启动docker

    [root@k8s-master1 ~]# systemctl enable docker && systemctl restart docker && systemctl status docker

    配置K8S源

    [root@k8s-master1 ~]# cat << EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled= 1
    gpgcheck= 1
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF

    安装K8S

    [root@k8s-master1 ~]# yum list  kubelet kubeadm kubectl --showduplicates | sort -r
    [root@k8s-master1 ~]# yum install -y kubelet-1.15.5-0 kubeadm-1.15.5-0 kubectl-1.15.5-0 ipvsadm ipset
    ##设置kubelet开机自启动,注意:这一步不能直接执行 systemctl start kubelet,会报错,成功初始化完后kubelet会自动起来
    [root@k8s-master1 ~]# systemctl enable kubelet
    #kubectl命令补全
    [root@k8s-master1 ~]# source /usr/share/bash-completion/bash_completion
    [root@k8s-master1 ~]# source <(kubectl completion bash)
    [root@k8s-master1 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc
    [root@k8s-master1 tmp]# kubeadm config print init-defaults > kubeadm-init.yaml
    [root@k8s-master1 tmp]# cp kubeadm-init.yaml{,.bak}
    [root@k8s-master1 tmp]# vim kubeadm-init.yaml
    需要修改advertiseAddresscontrolPlaneEndpointimageRepositoryserviceSubnetkubernetesVersion
    1. advertiseAddressmaster1ip
    2. controlPlaneEndpointVIP+6443端口
    3. imageRepository修改为阿里的源
    4. serviceSubnet找网络组要一段没有使用的IP
    5. kubernetesVersion和上一步的版本一致
    [root@k8s-master1 ~]# cat kubeadm-init.yaml
    apiVersion: kubeadm.k8s.io/v1beta2
    bootstrapTokens:
    - groups:
      - system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef
      ttl: 24h0m0s
      usages:
      - signing
      - authentication
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 10.0.0.1
      bindPort: 6443
    nodeRegistration:
      criSocket: /var/run/dockershim.sock
      name: k8s-master1
      taints:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
    ---
    apiServer:
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controllerManager: {}
    controlPlaneEndpoint: "10.0.0.10:6443"
    dns:
      type: CoreDNS
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    kubernetesVersion: v1.15.5
    networking:
      dnsDomain: cluster.local
      serviceSubnet: 10.96.0.0/12
    scheduler: {}

    部署K8S

    [root@k8s-master1 tmp]# kubeadm config images pull --config kubeadm-init.yaml
    [root@k8s-master1 ~]# kubeadm init --config=kubeadm-init.yaml
    [root@k8s-master1 tmp]# mkdir -p $HOME/.kube
    [root@k8s-master1 tmp]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    [root@k8s-master1 tmp]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

    添加其他的master节点

    在k8s-master1将证书文件拷贝至k8s-master2、k8s-master3节点
    在k8s-master1上部署
    #拷贝证书至k8s-master2节点
    [root@k8s-master1 ~]# vim k8s-master-cert.sh
    #!/bin/bash
    USER=root
    CONTROL_PLANE_IPS="k8s-master2 k8s-master3"
    for host in ${CONTROL_PLANE_IPS}; do
        ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"
        scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/
        scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/
        scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/
        scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/
        scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
    done
    [root@k8s-master1 ~]# sh -x k8s-master-cert.sh
     #在k8s-master02上执行,注意注意--experimental-control-plane参数 
    [root@k8s
    -master02 ~]# kubeadm join --token abcdef.0123456789abcdef
    \
    > --discovery-token-ca-cert-hash sha256:gdfa5553064e75391e03eef75b8fa16ba121f5aheffe85e8187kk6207b610coo
    \
    > --control-plane

    部署calico插件

    [root@k8s-master1 tmp]# wget -c https://docs.projectcalico.org/v3.6/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
     
    #修改calico.yaml,修改CALICO_IPV4POOL_CIDR这个下面的vaule值。在前面设置的serviceSubnet的值
    [root@k8s-master1 tmp]# cp calico.yaml{,.bak}
    [root@k8s-master1 tmp]# vim calico.yaml
    [root@k8s-master1 tmp]# kubectl apply -f calico.yaml
    configmap/calico-config created
    customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
    clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
    clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
    clusterrole.rbac.authorization.k8s.io/calico-node created
    clusterrolebinding.rbac.authorization.k8s.io/calico-node created
    daemonset.extensions/calico-node created
    serviceaccount/calico-node created
    deployment.extensions/calico-kube-controllers created
    serviceaccount/calico-kube-controllers created
    
    [root@k8s-master1 tmp]# kubectl get nodes
    NAME           STATUS     ROLES    AGE   VERSION
    k8s-master1   Ready      master   59m   v1.15.5
    k8s-master2   Ready      master   25m   v1.15.5
    k8s-master3   Ready      master   22m   v1.15.5

    配置ipvs

    [root@k8s-master1 ~]# kubectl edit cm kube-proxy -n kube-system
     
    #重启kube-proxy pod
    [root@k8s-master1 ~]#  kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
    pod "kube-proxy-5s6t5" deleted
    pod "kube-proxy-6xjl5" deleted
    pod "kube-proxy-h5q6x" deleted
    pod "kube-proxy-44hjk" deleted
    pod "kube-proxy-yc67g" deleted
    pod "kube-proxy-6wmh9" deleted
     
    #查看Kube-proxy pod状态
    [root@k8s-master1 ~]# kubectl get pod -n kube-system | grep kube-proxy
    kube-proxy-5vh6s                           /     Running             82s
    kube-proxy-4tp4d                           /     Running             2m2s
    kube-proxy-5d8sg                           /     Running             114s
    kube-proxy-l5cgw                           /     Running             97s
    kube-proxy-s3v9f                           /     Running             106s
    kube-proxy-4dfx7                           /     Running             79s
     
    #查看是否开启了ipvs
    [root@k8s-master1 ~]# kubectl logs kube-proxy-5vh6s -n kube-system
    I0727 :: server_others.go:] Using ipvs Proxier.
    W0727 :: proxier.go:] clusterCIDR not specified, unable to distinguish between internal and external traffic
    W0727 :: proxier.go:] IPVS scheduler not specified, use rr by default
    I0727 :: server.go:] Version: v1.15.5
    I0727 :: conntrack.go:] Setting nf_conntrack_max to
    I0727 :: config.go:] Starting service config controller
    I0727 :: config.go:] Starting endpoints config controller
    I0727 :: controller_utils.go:] Waiting for caches to sync for endpoints config controller
    I0727 :: controller_utils.go:] Waiting for caches to sync for service config controller
    I0727 :: controller_utils.go:] Caches are synced for service config controller
    I0727 :: controller_utils.go:] Caches are synced for endpoints config controller
    [root@k8s-master1 ~]# kubectl logs kube-proxy-ssv94 -n kube-system  | grep "ipvs"
    I0727 :: server_others.go:] Using ipvs Proxier.

     查看K8S组件状态

    [root@k8s-master1 ~]# kubectl get cs
    NAME                 STATUS    MESSAGE             ERROR
    controller-manager   Healthy   ok
    scheduler            Healthy   ok
    etcd-0               Healthy   {"health":"true"}

     master节点去污

    [root@k8s-master1 ~]# kubectl taint nodes --all node-role.kubernetes.io/master-
  • 相关阅读:
    在线教程
    《Linux命令行与shell脚本编程大全 第3版》Shell脚本编程基础---46
    《Linux命令行与shell脚本编程大全 第3版》Shell脚本编程基础---45
    《Linux命令行与shell脚本编程大全 第3版》Shell脚本编程基础---44
    《Linux命令行与shell脚本编程大全 第3版》Shell脚本编程基础---42
    《Linux命令行与shell脚本编程大全 第3版》Shell脚本编程基础---43
    《Linux命令行与shell脚本编程大全 第3版》Shell脚本编程基础---41
    《Linux命令行与shell脚本编程大全 第3版》Shell脚本编程基础---40
    《Linux命令行与shell脚本编程大全 第3版》Shell脚本编程基础---37
    《Linux命令行与shell脚本编程大全 第3版》Shell脚本编程基础---36
  • 原文地址:https://www.cnblogs.com/lingfenglian/p/11753590.html
Copyright © 2020-2023  润新知