• vm虚拟机搭建k8s高可用集群(1.18.6)


    背景:为了后期上kubesphere 3.0,所以安装1.18.x

    这里我们采用堆叠拓扑的方式构建高可用集群,因为k8s 集群etcd采用了raft算法保证集群一致性,所以高可用必须保证至少3台master+2work

    准备5台虚拟机centOS7(http://mirrors.aliyun.com/centos/7.8.2003/isos/x86_64/) 2c 8g  40g

    172.16.5.224 k8smaster1 
    172.16.5.225 k8smaster2 
    172.16.5.226 k8smaster3 
    172.16.5.227 k8sworker1 
    172.16.5.228 k8sworker2 
    172.16.5.230 vip

    首先在每台机器上执行以下脚本,这段脚本将会帮助你初始安装docker+k8s三件套:

    #!/bin/bash
    
    # 在 master 节点和 worker 节点都要执行
    
    # 安装 docker
    # 参考文档如下
    # https://docs.docker.com/install/linux/docker-ce/centos/ 
    # https://docs.docker.com/install/linux/linux-postinstall/
    
    # 卸载旧版本
    yum remove -y docker 
    docker-client 
    docker-client-latest 
    docker-common 
    docker-latest 
    docker-latest-logrotate 
    docker-logrotate 
    docker-selinux 
    docker-engine-selinux 
    docker-engine
    
    # 设置 yum repository
    yum install -y yum-utils 
    device-mapper-persistent-data 
    lvm2
    yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    
    # 安装并启动 docker
    yum install -y docker-ce docker-ce-cli containerd.io
    systemctl enable docker
    systemctl start docker
    
    # 安装 nfs-utils
    # 必须先安装 nfs-utils 才能挂载 nfs 网络存储
    yum install -y nfs-utils
    
    yum -y install wget
    
    # 关闭 防火墙
    systemctl stop firewalld
    systemctl disable firewalld
    
    # 关闭 SeLinux
    setenforce 0
    sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
    
    # 关闭 swap
    swapoff -a
    yes | cp /etc/fstab /etc/fstab_bak
    cat /etc/fstab_bak |grep -v swap > /etc/fstab
    
    # 修改 /etc/sysctl.conf
    # 如果有配置,则修改
    sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g"  /etc/sysctl.conf
    sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g"  /etc/sysctl.conf
    sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g"  /etc/sysctl.conf
    # 可能没有,追加
    echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
    echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
    echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
    # 执行命令以应用
    sysctl -p
    
    # 配置K8S的yum源
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
           http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    
    # 卸载旧版本
    yum remove -y kubelet kubeadm kubectl
    
    # 安装kubelet、kubeadm、kubectl
    yum install -y kubelet-1.18.6 kubeadm-1.18.6 kubectl-1.18.6
    
    # 修改docker Cgroup Driver为systemd
    # # 将/usr/lib/systemd/system/docker.service文件中的这一行 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
    # # 修改为 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd
    # 如果不修改,在添加 worker 节点时可能会碰到如下错误
    # [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". 
    # Please follow the guide at https://kubernetes.io/docs/setup/cri/
    sed -i "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd --data-root=/data/docker#g" /usr/lib/systemd/system/docker.service
    
    # 设置 docker 镜像,提高 docker 镜像下载速度和稳定性
    # 如果您访问 https://hub.docker.io 速度非常稳定,亦可以跳过这个步骤
    curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
    
    # 重启 docker,并启动 kubelet
    systemctl daemon-reload
    systemctl restart docker
    systemctl enable kubelet && systemctl start kubelet
    
    docker version

    接着我们进入master1修改hosts加入k8sapi地址(这里是实现高可用的重点)并初始化集群:
    cat>>/etc/hosts<<EOF
    172.16.5.224 k8sapi
    EOF
    kubeadm init --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=cri --control-plane-endpoint "k8sapi:6443" --kubernetes-version=1.18.6
    mkdir -p $HOME/.kube
    cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    chown $(id -u):$(id -g) $HOME/.kube/config

    登录master2、master3,host里把域名指向master1的ip:
    cat>>/etc/hosts<<EOF
    172.16.5.224 k8sapi
    EOF
    mkdir /etc/kubernetes/pki/
    mkdir /etc/kubernetes/pki/etcd

    重新登录master1并执行,拷贝证书到master2、master3:
    cd /etc/kubernetes/pki/
    scp ca.crt ca.key sa.key sa.pub front-proxy-ca.crt front-proxy-ca.key root@172.16.5.225:/etc/kubernetes/pki/
    scp ca.crt ca.key sa.key sa.pub front-proxy-ca.crt front-proxy-ca.key root@172.16.5.226:/etc/kubernetes/pki/
    cd etcd
    scp ca.crt ca.key root@172.16.5.225:/etc/kubernetes/pki/etcd/
    scp ca.crt ca.key root@172.16.5.226:/etc/kubernetes/pki/etcd/

    再次登录master2、master3并分别执行:
    kubeadm join k8sapi:6443 --token u6vw2b.b2avyx7blrzrwths
    --discovery-token-ca-cert-hash sha256:0c25b70c44b71c5f2f359155dc3f48f323fbe59a79db3abaf34bf947a4bf7989
    --control-plane
    mkdir -p $HOME/.kube
    cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    chown $(id -u):$(id -g) $HOME/.kube/config

    登录worker1、worker2、(worker3)并执行:
    cat>>/etc/hosts<<EOF
    172.16.5.224 k8sapi
    EOF
    kubeadm join k8sapi:6443 --token u6vw2b.b2avyx7blrzrwths
    --discovery-token-ca-cert-hash sha256:0c25b70c44b71c5f2f359155dc3f48f323fbe59a79db3abaf34bf947a4bf7989

    修改master1、master2、master3的host 域名指向127.0.0.1
    vi /etc/hosts
    127.0.0.1 k8sapi

    在master1、master2、master3 原生安装Keepalived(1主2从),并用systemctl注册成自启动服务
    yum install -y keepalived
    配置master:
    vi /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    global_defs {
    notification_email {
    root@localhost
    }
    notification_email_from root@localhost
    smtp_server 127.0.0.1
    smtp_connect_timeout 30
    router_id LVS_DEVEL
    vrrp_skip_check_adv_addr
    #vrrp_strict #把这王八蛋注释掉,不然严格遵守vvrp协议,访问不了vip的
    vrrp_garp_interval 0
    vrrp_gna_interval 0
    }
    vrrp_instance VI_1 {
    state MASTER
    interface ens33 #此处为自己的ip名,可使用命令 ip a/ip addr/ifconfig 查看
    virtual_router_id 51 #默认51,可以换一个地址,避免冲突,主备id要一样
    priority 100
    advert_int 1
    authentication {
    auth_type PASS
    auth_pass 1111
    }
    virtual_ipaddress {
    172.16.5.230 #此处为VIP/浮动ip/虚拟ip
    }
    }
    配置backup:
    ! Configuration File for keepalived
    global_defs {
    notification_email {
    root@localhost
    }
    notification_email_from root@localhost
    smtp_server localhost
    smtp_connect_timeout 30
    router_id LVS_DEVEL
    vrrp_skip_check_adv_addr
    #vrrp_strict
    vrrp_garp_interval 0
    vrrp_gna_interval 0
    }
    vrrp_instance VI_1 {
    state BACKUP
    interface ens33 #此处为自己的ip名,可使用命令 ip a/ip addr/ifconfig 查看
    virtual_router_id 51
    priority 90 #权重,两台backup配置不同的权重
    advert_int 1
    authentication {
    auth_type PASS
    auth_pass 1111
    }
    virtual_ipaddress {
    172.16.5.230 #此处为VIP/浮动ip/虚拟ip
    }
    }

    systemctl start keepalived //启动服务
    systemctl enable keepalived //开机自启动

    然后让所有wokrer的host指向这个vip
    vi /etc/hosts
    172.16.5.230 k8sapi

    最后在任意master节点执行安装网络插件:

    kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    无法访问,则单独上传到服务器:

    ---
    apiVersion: policy/v1beta1
    kind: PodSecurityPolicy
    metadata:
      name: psp.flannel.unprivileged
      annotations:
        seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
        seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
        apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
        apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
    spec:
      privileged: false
      volumes:
        - configMap
        - secret
        - emptyDir
        - hostPath
      allowedHostPaths:
        - pathPrefix: "/etc/cni/net.d"
        - pathPrefix: "/etc/kube-flannel"
        - pathPrefix: "/run/flannel"
      readOnlyRootFilesystem: false
      # Users and groups
      runAsUser:
        rule: RunAsAny
      supplementalGroups:
        rule: RunAsAny
      fsGroup:
        rule: RunAsAny
      # Privilege Escalation
      allowPrivilegeEscalation: false
      defaultAllowPrivilegeEscalation: false
      # Capabilities
      allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
      defaultAddCapabilities: []
      requiredDropCapabilities: []
      # Host namespaces
      hostPID: false
      hostIPC: false
      hostNetwork: true
      hostPorts:
      - min: 0
        max: 65535
      # SELinux
      seLinux:
        # SELinux is unused in CaaSP
        rule: 'RunAsAny'
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
      name: flannel
    rules:
      - apiGroups: ['extensions']
        resources: ['podsecuritypolicies']
        verbs: ['use']
        resourceNames: ['psp.flannel.unprivileged']
      - apiGroups:
          - ""
        resources:
          - pods
        verbs:
          - get
      - apiGroups:
          - ""
        resources:
          - nodes
        verbs:
          - list
          - watch
      - apiGroups:
          - ""
        resources:
          - nodes/status
        verbs:
          - patch
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
      name: flannel
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: flannel
    subjects:
    - kind: ServiceAccount
      name: flannel
      namespace: kube-system
    ---
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: flannel
      namespace: kube-system
    ---
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: kube-flannel-cfg
      namespace: kube-system
      labels:
        tier: node
        app: flannel
    data:
      cni-conf.json: |
        {
          "name": "cbr0",
          "cniVersion": "0.3.1",
          "plugins": [
            {
              "type": "flannel",
              "delegate": {
                "hairpinMode": true,
                "isDefaultGateway": true
              }
            },
            {
              "type": "portmap",
              "capabilities": {
                "portMappings": true
              }
            }
          ]
        }
      net-conf.json: |
        {
          "Network": "10.244.0.0/16",
          "Backend": {
            "Type": "vxlan"
          }
        }
    ---
    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
      name: kube-flannel-ds-amd64
      namespace: kube-system
      labels:
        tier: node
        app: flannel
    spec:
      selector:
        matchLabels:
          app: flannel
      template:
        metadata:
          labels:
            tier: node
            app: flannel
        spec:
          affinity:
            nodeAffinity:
              requiredDuringSchedulingIgnoredDuringExecution:
                nodeSelectorTerms:
                  - matchExpressions:
                      - key: kubernetes.io/os
                        operator: In
                        values:
                          - linux
                      - key: kubernetes.io/arch
                        operator: In
                        values:
                          - amd64
          hostNetwork: true
          priorityClassName: system-node-critical
          tolerations:
          - operator: Exists
            effect: NoSchedule
          serviceAccountName: flannel
          initContainers:
          - name: install-cni
            image: quay.io/coreos/flannel:v0.12.0-amd64
            command:
            - cp
            args:
            - -f
            - /etc/kube-flannel/cni-conf.json
            - /etc/cni/net.d/10-flannel.conflist
            volumeMounts:
            - name: cni
              mountPath: /etc/cni/net.d
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          containers:
          - name: kube-flannel
            image: quay.io/coreos/flannel:v0.12.0-amd64
            command:
            - /opt/bin/flanneld
            args:
            - --ip-masq
            - --kube-subnet-mgr
            resources:
              requests:
                cpu: "100m"
                memory: "50Mi"
              limits:
                cpu: "100m"
                memory: "50Mi"
            securityContext:
              privileged: false
              capabilities:
                add: ["NET_ADMIN", "NET_RAW"]
            env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            volumeMounts:
            - name: run
              mountPath: /run/flannel
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          volumes:
            - name: run
              hostPath:
                path: /run/flannel
            - name: cni
              hostPath:
                path: /etc/cni/net.d
            - name: flannel-cfg
              configMap:
                name: kube-flannel-cfg
    ---
    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
      name: kube-flannel-ds-arm64
      namespace: kube-system
      labels:
        tier: node
        app: flannel
    spec:
      selector:
        matchLabels:
          app: flannel
      template:
        metadata:
          labels:
            tier: node
            app: flannel
        spec:
          affinity:
            nodeAffinity:
              requiredDuringSchedulingIgnoredDuringExecution:
                nodeSelectorTerms:
                  - matchExpressions:
                      - key: kubernetes.io/os
                        operator: In
                        values:
                          - linux
                      - key: kubernetes.io/arch
                        operator: In
                        values:
                          - arm64
          hostNetwork: true
          priorityClassName: system-node-critical
          tolerations:
          - operator: Exists
            effect: NoSchedule
          serviceAccountName: flannel
          initContainers:
          - name: install-cni
            image: quay.io/coreos/flannel:v0.12.0-arm64
            command:
            - cp
            args:
            - -f
            - /etc/kube-flannel/cni-conf.json
            - /etc/cni/net.d/10-flannel.conflist
            volumeMounts:
            - name: cni
              mountPath: /etc/cni/net.d
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          containers:
          - name: kube-flannel
            image: quay.io/coreos/flannel:v0.12.0-arm64
            command:
            - /opt/bin/flanneld
            args:
            - --ip-masq
            - --kube-subnet-mgr
            resources:
              requests:
                cpu: "100m"
                memory: "50Mi"
              limits:
                cpu: "100m"
                memory: "50Mi"
            securityContext:
              privileged: false
              capabilities:
                  add: ["NET_ADMIN", "NET_RAW"]
            env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            volumeMounts:
            - name: run
              mountPath: /run/flannel
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          volumes:
            - name: run
              hostPath:
                path: /run/flannel
            - name: cni
              hostPath:
                path: /etc/cni/net.d
            - name: flannel-cfg
              configMap:
                name: kube-flannel-cfg
    ---
    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
      name: kube-flannel-ds-arm
      namespace: kube-system
      labels:
        tier: node
        app: flannel
    spec:
      selector:
        matchLabels:
          app: flannel
      template:
        metadata:
          labels:
            tier: node
            app: flannel
        spec:
          affinity:
            nodeAffinity:
              requiredDuringSchedulingIgnoredDuringExecution:
                nodeSelectorTerms:
                  - matchExpressions:
                      - key: kubernetes.io/os
                        operator: In
                        values:
                          - linux
                      - key: kubernetes.io/arch
                        operator: In
                        values:
                          - arm
          hostNetwork: true
          priorityClassName: system-node-critical
          tolerations:
          - operator: Exists
            effect: NoSchedule
          serviceAccountName: flannel
          initContainers:
          - name: install-cni
            image: quay.io/coreos/flannel:v0.12.0-arm
            command:
            - cp
            args:
            - -f
            - /etc/kube-flannel/cni-conf.json
            - /etc/cni/net.d/10-flannel.conflist
            volumeMounts:
            - name: cni
              mountPath: /etc/cni/net.d
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          containers:
          - name: kube-flannel
            image: quay.io/coreos/flannel:v0.12.0-arm
            command:
            - /opt/bin/flanneld
            args:
            - --ip-masq
            - --kube-subnet-mgr
            resources:
              requests:
                cpu: "100m"
                memory: "50Mi"
              limits:
                cpu: "100m"
                memory: "50Mi"
            securityContext:
              privileged: false
              capabilities:
                  add: ["NET_ADMIN", "NET_RAW"]
            env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            volumeMounts:
            - name: run
              mountPath: /run/flannel
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          volumes:
            - name: run
              hostPath:
                path: /run/flannel
            - name: cni
              hostPath:
                path: /etc/cni/net.d
            - name: flannel-cfg
              configMap:
                name: kube-flannel-cfg
    ---
    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
      name: kube-flannel-ds-ppc64le
      namespace: kube-system
      labels:
        tier: node
        app: flannel
    spec:
      selector:
        matchLabels:
          app: flannel
      template:
        metadata:
          labels:
            tier: node
            app: flannel
        spec:
          affinity:
            nodeAffinity:
              requiredDuringSchedulingIgnoredDuringExecution:
                nodeSelectorTerms:
                  - matchExpressions:
                      - key: kubernetes.io/os
                        operator: In
                        values:
                          - linux
                      - key: kubernetes.io/arch
                        operator: In
                        values:
                          - ppc64le
          hostNetwork: true
          priorityClassName: system-node-critical
          tolerations:
          - operator: Exists
            effect: NoSchedule
          serviceAccountName: flannel
          initContainers:
          - name: install-cni
            image: quay.io/coreos/flannel:v0.12.0-ppc64le
            command:
            - cp
            args:
            - -f
            - /etc/kube-flannel/cni-conf.json
            - /etc/cni/net.d/10-flannel.conflist
            volumeMounts:
            - name: cni
              mountPath: /etc/cni/net.d
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          containers:
          - name: kube-flannel
            image: quay.io/coreos/flannel:v0.12.0-ppc64le
            command:
            - /opt/bin/flanneld
            args:
            - --ip-masq
            - --kube-subnet-mgr
            resources:
              requests:
                cpu: "100m"
                memory: "50Mi"
              limits:
                cpu: "100m"
                memory: "50Mi"
            securityContext:
              privileged: false
              capabilities:
                  add: ["NET_ADMIN", "NET_RAW"]
            env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            volumeMounts:
            - name: run
              mountPath: /run/flannel
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          volumes:
            - name: run
              hostPath:
                path: /run/flannel
            - name: cni
              hostPath:
                path: /etc/cni/net.d
            - name: flannel-cfg
              configMap:
                name: kube-flannel-cfg
    ---
    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
      name: kube-flannel-ds-s390x
      namespace: kube-system
      labels:
        tier: node
        app: flannel
    spec:
      selector:
        matchLabels:
          app: flannel
      template:
        metadata:
          labels:
            tier: node
            app: flannel
        spec:
          affinity:
            nodeAffinity:
              requiredDuringSchedulingIgnoredDuringExecution:
                nodeSelectorTerms:
                  - matchExpressions:
                      - key: kubernetes.io/os
                        operator: In
                        values:
                          - linux
                      - key: kubernetes.io/arch
                        operator: In
                        values:
                          - s390x
          hostNetwork: true
          priorityClassName: system-node-critical
          tolerations:
          - operator: Exists
            effect: NoSchedule
          serviceAccountName: flannel
          initContainers:
          - name: install-cni
            image: quay.io/coreos/flannel:v0.12.0-s390x
            command:
            - cp
            args:
            - -f
            - /etc/kube-flannel/cni-conf.json
            - /etc/cni/net.d/10-flannel.conflist
            volumeMounts:
            - name: cni
              mountPath: /etc/cni/net.d
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          containers:
          - name: kube-flannel
            image: quay.io/coreos/flannel:v0.12.0-s390x
            command:
            - /opt/bin/flanneld
            args:
            - --ip-masq
            - --kube-subnet-mgr
            resources:
              requests:
                cpu: "100m"
                memory: "50Mi"
              limits:
                cpu: "100m"
                memory: "50Mi"
            securityContext:
              privileged: false
              capabilities:
                  add: ["NET_ADMIN", "NET_RAW"]
            env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            volumeMounts:
            - name: run
              mountPath: /run/flannel
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          volumes:
            - name: run
              hostPath:
                path: /run/flannel
            - name: cni
              hostPath:
                path: /etc/cni/net.d
            - name: flannel-cfg
              configMap:
                name: kube-flannel-cfg
    View Code

    先拉取依赖镜像
    docker pull quay.io/coreos/flannel:v0.12.0-amd64

    把上面文件保存到服务器然后执行下面命令
    kubectl apply -f kube-flannel.yml

    最终效果:

  • 相关阅读:
    Javascript构造函数的继承
    什么数据库能抗住《王者荣耀》的1亿DAU?
    支持微信支付亿级请求的TBase数据库大揭秘
    我在MySQL的那些年(一)
    谁是银行核心数据库的破局者?
    X侦探所事件薄 | 一次内存溢出之谜
    腾讯云数据库新生代产品获国家级认证
    POJ 2594 传递闭包的最小路径覆盖
    POJ 1719 二分图最大匹配(记录路径)
    HDU 1533 KM算法(权值最小的最佳匹配)
  • 原文地址:https://www.cnblogs.com/jeanson/p/13601030.html
Copyright © 2020-2023  润新知