• k8s:三主三从高可用集群搭建(kubeadm)


    准备环境

    本文基于kubeadm搭建k8s高可用三主三从集群,k8s版本1.16

    角色 ip 机器配置
    master1 192.168.1.77 4个CPU,12G内存,60G硬盘
    master2 192.168.1.78 4个CPU,12G内存,60G硬盘
    master3 192.168.1.79 4个CPU,12G内存,60G硬盘
    node1 192.168.1.80 4个CPU,12G内存,60G硬盘
    node2 192.168.1.81 4个CPU,12G内存,50G硬盘
    node3 192.168.1.82 4个CPU,12G内存,50G硬盘
    vip(虚拟ip) 192.168.1.83
    # 所有机器执行:关闭防火墙
    systemctl stop firewalld
    systemctl disable firewalld
    
    # 所有机器执行:selinux
    sed -i 's/enforcing/disabled/' /etc/selinux/config  # 永久
    setenforce 0  # 临时
    
    # 所有机器执行:关闭swap
    swapoff -a  # 临时
    sed -ri 's/.*swap.*/#&/' /etc/fstab    # 永久
    
    # 所有机器执行:根据规划设置主机名
    hostnamectl set-hostname <hostname>
    
    # 所有机器执行:添加hosts文件
    cat >> /etc/hosts << EOF
    192.168.1.83    master.k8s.io   k8s-vip
    192.168.1.77    master01.k8s.io master1
    192.168.1.78    master02.k8s.io master2
    192.168.1.79    master03.k8s.io master3
    192.168.1.80    node01.k8s.io   node1
    192.168.1.81    node02.k8s.io   node2
    192.168.1.82    node03.k8s.io   node3
    EOF
    
    # 所有机器执行:将桥接的IPv4流量传递到iptables的链
    cat > /etc/sysctl.d/k8s.conf << EOF
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    EOF
    
    # 所有机器执行:生效
    sysctl --system  
    
    # 所有机器执行:时间同步
    yum install ntpdate -y
    ntpdate time.windows.com
    

    image-20210321180105278

    所有master节点部署keepalived

    安装相关包和keepalived

    yum install -y conntrack-tools libseccomp libtool-ltdl
    yum install -y keepalived
    

    配置master节点

    master节点配置(所有master节点均需要执行)

    cat > /etc/keepalived/keepalived.conf <<EOF 
    ! Configuration File for keepalived
    
    global_defs {
       router_id k8s
    }
    
    vrrp_script check_haproxy {
        script "killall -0 haproxy"
        interval 3
        weight -2
        fall 10
        rise 2
    }
    
    vrrp_instance VI_1 {
        state MASTER 
        interface ens192 
        virtual_router_id 51
        priority 250
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass ceb1b3ec013d66163d6ab
        }
        virtual_ipaddress {
            192.168.1.83
        }
        track_script {
            check_haproxy
        }
    
    }
    EOF
    

    注意:virtual_ipaddress的值是虚拟ip的值,前面已经声明过。

    interface的值根据实际情况设置,该值根据ifconfig结果有关:

    image-20210321181524856

    启动和检查

    所有master节点执行:

    # 启动keepalived
    systemctl start keepalived.service
    #设置开机启动
    systemctl enable keepalived.service
    # 查看启动状态
    systemctl status keepalived.service
    

    image-20210321181851908

    启动后查看master的网卡信息

    ip a s ens192
    

    三台master机器中必然有一台存在虚拟ip

    image-20210321183112801

    部署haproxy

    安装

    所有master节点安装haproxy

    yum install -y haproxy
    

    配置

    三台master节点的配置均相同,配置中声明了后端代理的三个master节点服务器,指定了haproxy运行的端口为16443等,因此16443端口为集群的入口

    cat > /etc/haproxy/haproxy.cfg << EOF
    #---------------------------------------------------------------------
    # Global settings
    #---------------------------------------------------------------------
    global
        # to have these messages end up in /var/log/haproxy.log you will
        # need to:
        # 1) configure syslog to accept network log events.  This is done
        #    by adding the '-r' option to the SYSLOGD_OPTIONS in
        #    /etc/sysconfig/syslog
        # 2) configure local2 events to go to the /var/log/haproxy.log
        #   file. A line like the following can be added to
        #   /etc/sysconfig/syslog
        #
        #    local2.*                       /var/log/haproxy.log
        #
        log         127.0.0.1 local2
        
        chroot      /var/lib/haproxy
        pidfile     /var/run/haproxy.pid
        maxconn     4000
        user        haproxy
        group       haproxy
        daemon 
           
        # turn on stats unix socket
        stats socket /var/lib/haproxy/stats
    #---------------------------------------------------------------------
    # common defaults that all the 'listen' and 'backend' sections will
    # use if not designated in their block
    #---------------------------------------------------------------------  
    defaults
        mode                    http
        log                     global
        option                  httplog
        option                  dontlognull
        option http-server-close
        option forwardfor       except 127.0.0.0/8
        option                  redispatch
        retries                 3
        timeout http-request    10s
        timeout queue           1m
        timeout connect         10s
        timeout client          1m
        timeout server          1m
        timeout http-keep-alive 10s
        timeout check           10s
        maxconn                 3000
    #---------------------------------------------------------------------
    # kubernetes apiserver frontend which proxys to the backends
    #--------------------------------------------------------------------- 
    frontend kubernetes-apiserver
        mode                 tcp
        bind                 *:16443
        option               tcplog
        default_backend      kubernetes-apiserver    
    #---------------------------------------------------------------------
    # round robin balancing between the various backends
    #---------------------------------------------------------------------
    backend kubernetes-apiserver
        mode        tcp
        balance     roundrobin
        server      master01.k8s.io   192.168.1.77:6443 check
        server      master02.k8s.io   192.168.1.78:6443 check
        server      master03.k8s.io   192.168.1.79:6443 check
    #---------------------------------------------------------------------
    # collection haproxy statistics message
    #---------------------------------------------------------------------
    listen stats
        bind                 *:1080
        stats auth           admin:awesomePassword
        stats refresh        5s
        stats realm          HAProxy Statistics
        stats uri            /admin?stats
    EOF
    

    启动和检查

    所有master执行

    systemctl enable haproxy
    systemctl start haproxy
    systemctl status haproxy
    

    image-20210321184114137

    检查端口

    netstat -lntup|grep haproxy
    

    image-20210321184219967

    所有节点安装Docker/kubeadm/kubelet/kubectl

    Kubernetes默认CRI(容器运行时)为Docker,因此先安装Docker。

    安装docker

    yum install -y wget
    wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
    yum -y install docker-ce-18.06.1.ce-3.el7
    systemctl enable docker && systemctl start docker
    docker --version
    
    cat > /etc/docker/daemon.json << EOF
    {
      "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
    }
    EOF
    
    systemctl restart docker
    

    添加阿里云YUM软件源

    cat > /etc/yum.repos.d/kubernetes.repo << EOF
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    
    systemctl restart docker
    

    安装kubeadm,kubelet和kubectl

    yum install -y kubelet-1.16.3 kubeadm-1.16.3 kubectl-1.16.3
    systemctl enable kubelet
    

    部署Kubernetes Master

    创建kubeadm配置文件

    在具有vip的master上操作(含有虚拟ip的master),我这里为master3

    mkdir /usr/local/kubernetes/manifests -p
    cd /usr/local/kubernetes/manifests/
    vi kubeadm-config.yaml
    
    apiServer:
      certSANs:
        - master1
        - master2
        - master3
        - master.k8s.io
        - 192.168.1.83
        - 192.168.1.77
        - 192.168.1.78
        - 192.168.1.79
        - 127.0.0.1
      extraArgs:
        authorization-mode: Node,RBAC
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta1
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controlPlaneEndpoint: "master.k8s.io:16443"
    controllerManager: {}
    dns: 
      type: CoreDNS
    etcd:
      local:    
        dataDir: /var/lib/etcd
    imageRepository: registry.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    kubernetesVersion: v1.16.3
    networking: 
      dnsDomain: cluster.local  
      podSubnet: 10.244.0.0/16
      serviceSubnet: 10.1.0.0/16
    scheduler: {}
    

    在vip节点执行kubeadm init

    在vip节点执行,我这里是master3

    kubeadm init --config kubeadm-config.yaml
    

    image-20210321192431352

    按照提示保存以下内容,一会要使用:

    kubeadm join master.k8s.io:16443 --token sjvdo1.fer288zrvufxzkha 
        --discovery-token-ca-cert-hash sha256:e56060a20f0e8eb7db07f17b23e88c0e5094ec9365a9f15fd2f126a6a783be67 
        --control-plane
    

    按照提示配置环境变量,使用kubectl工具:

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    

    查看集群状态:

    kubectl get nodes
    kubectl get cs
    kubectl get pods -n kube-system
    

    安装集群网络

    在master3节点上执行:

    mkdir flannel
    cd flannel
    vi kube-flannel.yml
    
    ---
    apiVersion: policy/v1beta1
    kind: PodSecurityPolicy
    metadata:
      name: psp.flannel.unprivileged
      annotations:
        seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
        seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
        apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
        apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
    spec:
      privileged: false
      volumes:
      - configMap
      - secret
      - emptyDir
      - hostPath
      allowedHostPaths:
      - pathPrefix: "/etc/cni/net.d"
      - pathPrefix: "/etc/kube-flannel"
      - pathPrefix: "/run/flannel"
      readOnlyRootFilesystem: false
      # Users and groups
      runAsUser:
        rule: RunAsAny
      supplementalGroups:
        rule: RunAsAny
      fsGroup:
        rule: RunAsAny
      # Privilege Escalation
      allowPrivilegeEscalation: false
      defaultAllowPrivilegeEscalation: false
      # Capabilities
      allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
      defaultAddCapabilities: []
      requiredDropCapabilities: []
      # Host namespaces
      hostPID: false
      hostIPC: false
      hostNetwork: true
      hostPorts:
      - min: 0
        max: 65535
      # SELinux
      seLinux:
        # SELinux is unused in CaaSP
        rule: 'RunAsAny'
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
      name: flannel
    rules:
    - apiGroups: ['extensions']
      resources: ['podsecuritypolicies']
      verbs: ['use']
      resourceNames: ['psp.flannel.unprivileged']
    - apiGroups:
      - ""
      resources:
      - pods
      verbs:
      - get
    - apiGroups:
      - ""
      resources:
      - nodes
      verbs:
      - list
      - watch
    - apiGroups:
      - ""
      resources:
      - nodes/status
      verbs:
      - patch
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
      name: flannel
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: flannel
    subjects:
    - kind: ServiceAccount
      name: flannel
      namespace: kube-system
    ---
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: flannel
      namespace: kube-system
    ---
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: kube-flannel-cfg
      namespace: kube-system
      labels:
        tier: node
        app: flannel
    data:
      cni-conf.json: |
        {
          "name": "cbr0",
          "cniVersion": "0.3.1",
          "plugins": [
            {
              "type": "flannel",
              "delegate": {
                "hairpinMode": true,
                "isDefaultGateway": true
              }
            },
            {
              "type": "portmap",
              "capabilities": {
                "portMappings": true
              }
            }
          ]
        }
      net-conf.json: |
        {
          "Network": "10.244.0.0/16",
          "Backend": {
            "Type": "vxlan"
          }
        }
    ---
    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
      name: kube-flannel-ds
      namespace: kube-system
      labels:
        tier: node
        app: flannel
    spec:
      selector:
        matchLabels:
          app: flannel
      template:
        metadata:
          labels:
            tier: node
            app: flannel
        spec:
          affinity:
            nodeAffinity:
              requiredDuringSchedulingIgnoredDuringExecution:
                nodeSelectorTerms:
                - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                    - linux
          hostNetwork: true
          priorityClassName: system-node-critical
          tolerations:
          - operator: Exists
            effect: NoSchedule
          serviceAccountName: flannel
          initContainers:
          - name: install-cni
            image: quay.io/coreos/flannel:v0.13.0-rc2
            command:
            - cp
            args:
            - -f
            - /etc/kube-flannel/cni-conf.json
            - /etc/cni/net.d/10-flannel.conflist
            volumeMounts:
            - name: cni
              mountPath: /etc/cni/net.d
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          containers:
          - name: kube-flannel
            image: quay.io/coreos/flannel:v0.13.0-rc2
            command:
            - /opt/bin/flanneld
            args:
            - --ip-masq
            - --kube-subnet-mgr
            resources:
              requests:
                cpu: "100m"
                memory: "50Mi"
              limits:
                cpu: "100m"
                memory: "50Mi"
            securityContext:
              privileged: false
              capabilities:
                add: ["NET_ADMIN", "NET_RAW"]
            env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            volumeMounts:
            - name: run
              mountPath: /run/flannel
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          volumes:
          - name: run
            hostPath:
              path: /run/flannel
          - name: cni
            hostPath:
              path: /etc/cni/net.d
          - name: flannel-cfg
            configMap:
              name: kube-flannel-cfg
    

    安装flannel网络

    kubectl apply -f kube-flannel.yml
    

    检查

    kubectl get pods -n kube-system
    

    image-20210321193716431

    其余master节点加入到集群中

    先保证主节点处于ready状态

    image-20210321194021684

    因为现在master3已经在集群中,我需要把master1和master2加入到集群中

    复制密钥及相关文件

    在master3中执行

    ssh root@192.168.1.77 mkdir -p /etc/kubernetes/pki/etcd
    ssh root@192.168.1.78 mkdir -p /etc/kubernetes/pki/etcd
    scp /etc/kubernetes/admin.conf root@192.168.1.77:/etc/kubernetes
    scp /etc/kubernetes/admin.conf root@192.168.1.78:/etc/kubernetes
    scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@192.168.1.77:/etc/kubernetes/pki
    scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@192.168.1.78:/etc/kubernetes/pki
    scp /etc/kubernetes/pki/etcd/ca.* root@192.168.1.77:/etc/kubernetes/pki/etcd
    scp /etc/kubernetes/pki/etcd/ca.* root@192.168.1.78:/etc/kubernetes/pki/etcd
    

    加入集群

    在master1和master2中执行刚才保存的语句,带上参数--control-plane表示把master控制节点加入集群

    kubeadm join master.k8s.io:16443 --token sjvdo1.fer288zrvufxzkha 
        --discovery-token-ca-cert-hash sha256:e56060a20f0e8eb7db07f17b23e88c0e5094ec9365a9f15fd2f126a6a783be67 
        --control-plane
    

    查看集群状态:master3执行

    kubectl get nodes
    kubectl get pods -n kube-system
    

    image-20210321195115442

    master1和master2执行:

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    

    至此,三主的k8s集群搭建成功

    添加node节点

    三个node节点执行:注意这里去掉了--control-plane参数

    kubeadm join master.k8s.io:16443 --token sjvdo1.fer288zrvufxzkha 
        --discovery-token-ca-cert-hash sha256:e56060a20f0e8eb7db07f17b23e88c0e5094ec9365a9f15fd2f126a6a783be67
    

    集群网络重新安装:master3中执行:

    kubectl delete -f kube-flannel.yml
    kubectl apply -f kube-flannel.yml
    

    查看所有的pods

    kubectl get pods --all-namespaces
    

    image-20210321204349978

    检查k8s集群状态:

    kubectl get nodes
    

    image-20210321204427032

    测试k8s集群

    kubectl create deployment nginx --image=nginx
    kubectl expose deployment nginx --port=80 --type=NodePort
    kubectl get pod,svc
    

    这里暴露的端口是:30683

    image-20210321204853175

    master节点访问:

    image-20210321205049752

    node节点访问:

    image-20210321205152345

    虚拟ip访问:

    image-20210321205231015

    至此,k8s三主三从高可用集群搭建和测试完毕!

  • 相关阅读:
    Net Remoting(应用程序域)
    C# 方便的复制/比较物件内数据的方法(Object Copy / Compare)
    c# 序列化
    Windows Phone 7 开发日志(初试一、随便研究)
    关于Windows Phone 7推广策略的失误
    vb 托盘图标
    vb code中调用exe文件,当关闭的时候直接关闭文件.
    调用INI文件
    使用SQL的JOB调用DTS定制任务!
    给Word设置目录
  • 原文地址:https://www.cnblogs.com/wwjj4811/p/14563927.html
Copyright © 2020-2023  润新知