• 部署多master节点的高可用集群(1.17.3)


    一、部署环境

    二)初始化实验环境

    1、修改yum源,各节点上操作(云环境忽略该步骤)

    - 备份原来的yum源
    mv /etc/yum.repos.d/CentOS-Base.repo/etc/yum.repos.d/CentOS-Base.repo.backup
    - 下载阿里的yum源 
    wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
    - 生成新的yum缓存
    yum makecache fast
    - 配置安装k8s需要的yum源
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=0
    EOF
    - 清理yum缓存 
    yum clean all
    - 生成新的yum缓存 
    yum makecache fast
    - 更新yum源 
    yum -y update
    - 安装软件包
    yum -y install yum-utilsdevice-mapper-persistent-data  lvm2
    - 添加新的软件源 
    yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    yum clean all
    yum makecache fast

    2、安装基础软件包,各节点操作

    安装基础软件包,各个节点操作
    
    yum -y install wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack ntpdate

    3、关闭firewalld防火墙,各个节点操作

    systemctl disable firewalld
    
    systemctl stop firewalld

    4、安装iptables,各个节点操作

    yum install iptables-services -y
    
    service iptables stop && systemctl disable iptables
    
    /usr/sbin/iptables -P FORWARD ACCEPT
    
    /usr/sbin/iptables -X
    
    /usr/sbin/iptables -F -t nat
    
    /usr/sbin/iptables -X -t nat

    5、时间同步,各个节点操作

    ntpdate cn.pool.ntp.org
    
    crontab -e
    
    * */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org

    6、关闭selinux,各个节点操作

    修改/etc/sysconfig/selinux文件
    
    sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux
    
    sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
    
    setenforce 0

    7、关闭交换分区,各节点

    swapoff -a
    
    # 永久禁用,打开/etc/fstab注释掉swap那一行。
    
    sed -i 's/.*swap.*/#&/' /etc/fstab

    8、修改内核参数,各个节点操作

    echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
    
    echo 1 >/proc/sys/net/bridge/bridge-nf-call-ip6tables
    
    cat > /etc/sysctl.d/k8s.conf <<-EOF
    
    net.bridge.bridge-nf-call-iptables=1
    
    net.bridge.bridge-nf-call-ip6tables=1
    
    net.ipv4.ip_forward=1
    
    vm.swappiness=0
    
    vm.overcommit_memory=1
    
    vm.panic_on_oom=0
    
    fs.inotify.max_user_instances=8192
    
    fs.inotify.max_user_watches=1048576
    
    fs.file-max=52706963
    
    fs.nr_open=52706963
    
    net.ipv6.conf.all.disable_ipv6=1
    
    net.netfilter.nf_conntrack_max=2310720
    
    EOF
    
    sysctl --system

    9、安装ipvs模块

    cat > /etc/sysconfig/modules/br_netfilter.modules << EOF
    
    modprobe br_netfilter
    EOF
    chmod 755 /etc/sysconfig/modules/br_netfilter.modules
    ##安装ipvs模块
    cat > /etc/sysconfig/modules/ipvs.modules <<EOF
    #!/bin/bash
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

    10、配置limits.conf

    [ $(cat /etc/security/limits.conf|grep '* soft nproc 10240000'|wc -l) -eq 0 ] && echo '* soft nproc 10240000' >>/etc/security/limits.conf
    
    [ $(cat /etc/security/limits.conf|grep '* hard nproc 10240000'|wc -l) -eq 0 ] && echo '* hard nproc 10240000' >>/etc/security/limits.conf
    [ $(cat /etc/security/limits.conf|grep '* soft nofile 10240000'|wc -l) -eq 0 ] && echo '* soft nofile 10240000' >>/etc/security/limits.conf
    [ $(cat /etc/security/limits.conf|grep '* hard nofile 10240000'|wc -l) -eq 0 ] && echo '* hard nofile 10240000' >>/etc/security/limits.conf

    配置master01到其他各节点无密码登陆

    11、安装ansible,在192.168.31.72节点上

    配置master01到其他各节点无密码登陆
    
    yum -y install ansible
    ssh-keygen -t rsa
    for i in 70 71 72 73 75 82 83 84 86 67 68 69 64 ;do ssh-copy-id -i .ssh/id_rsa.pub root@192.168.31.$i;done
    [root@harbor-ansible ~]# cat /etc/ansible/hosts
    [haproxy]
    192.168.31.70
    192.168.31.71
    [k8s-master]
    192.168.31.72
    192.168.31.73
    192.168.31.75
    [k8s-worker]
    192.168.31.82
    192.168.31.83
    192.168.31.84
    192.168.31.86
    192.168.31.67
    192.168.31.68
    192.168.31.69
    192.168.31.64

    9、在master01节点上配置hosts文件,使用ansible推送到各节点

    在/etc/hosts文件增加如下几行
    
    127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
    ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
    192.168.31.72 master01.ziioffice.com master01
    192.168.31.73 master02.ziioffice.com master02
    192.168.31.75 master03.ziioffice.com master03
    192.168.31.82 worker01.ziioffice.com worker01
    192.168.31.83 worker02.ziioffice.com worker02
    192.168.31.84 worker03.ziioffice.com worker03
    192.168.31.86 worker04.ziioffice.com worker04
    192.168.31.67 worker05.ziioffice.com worker05
    192.168.31.68 worker06.ziioffice.com worker06
    192.168.31.64 worker07.ziioffice.com worker07
    192.168.31.69 worker08.ziioffice.com worker08
    #推送hosts文件到其他节点上
    ansible all -m copy -a 'src=./hosts dest=/etc/hosts'

    二)部署高可用keepalived+haproxy

    #安装部署haproxy
    
    mkdir -p /opt/apply
    cd /usr/local/src/
    wget http://download.51yuki.cn/haproxy-1.8.17.tar.gz
    tar xvf haproxy-1.8.17.tar.gz
    cd haproxy-1.8.17/
    yum install gcc pcre pcre-devel openssl openssl-devel -y
    make TARGET=linux2628 USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 PREFIX=/opt/apply/haproxy
    cd /usr/local/src/haproxy-1.8.17/contrib/systemd
    make install PREFIX=/opt/apply/haproxy
    #准备haproxy的启动文件
    cat > /etc/init.d/haproxy <<-EOF #!/bin/bash # # chkconfig: - 85 15 # description: HA-Proxy is a TCP/HTTP reverse proxy which is particularly suited # for high availability environments. # processname: haproxy # config: /etc/haproxy/haproxy.cfg # pidfile: /var/run/haproxy.pid # Script Author: Simon Matter <simon.matter@invoca.ch> # Version: 2004060600 # Source function library. if [ -f /etc/init.d/functions ]; then . /etc/init.d/functions elif [ -f /etc/rc.d/init.d/functions ] ; then . /etc/rc.d/init.d/functions else exit 0 fi # Source networking configuration. . /etc/sysconfig/network # Check that networking is up. #[ ${NETWORKING} == "no" ] && exit 0 # This is our service name BASENAME=`basename $0` if [ -L $0 ]; then BASENAME=`find $0 -name $BASENAME -printf %l` BASENAME=`basename $BASENAME` fi BIN=/opt/apply/haproxy/sbin/haproxy CFG=/etc/haproxy/haproxy.cfg [ -f $CFG ] || exit 1 PIDFILE=/var/run/haproxy.pid LOCKFILE=/var/lock/subsys/haproxy RETVAL=0 start() { quiet_check if [ $? -ne 0 ]; then echo "Errors found in configuration file, check it with '$BASENAME check'." return 1 fi echo -n "Starting $BASENAME: " daemon $BIN -D -f $CFG -p $PIDFILE RETVAL=$? echo [ $RETVAL -eq 0 ] && touch $LOCKFILE return $RETVAL } stop() { echo -n "Shutting down $BASENAME: " killproc $BASENAME -USR1 RETVAL=$? echo [ $RETVAL -eq 0 ] && rm -f $LOCKFILE [ $RETVAL -eq 0 ] && rm -f $PIDFILE return $RETVAL } restart() { quiet_check if [ $? -ne 0 ]; then echo "Errors found in configuration file, check it with '$BASENAME check'." return 1 fi stop start } reload() { if ! [ -s $PIDFILE ]; then return 0 fi quiet_check if [ $? -ne 0 ]; then echo "Errors found in configuration file, check it with '$BASENAME check'." return 1 fi $BIN -D -f $CFG -p $PIDFILE -sf $(cat $PIDFILE) } check() { $BIN -c -q -V -f $CFG } quiet_check() { $BIN -c -q -f $CFG } rhstatus() { status $BASENAME } condrestart() { [ -e $LOCKFILE ] && restart || : } # See how we were called. case "$1" in start) start ;; stop) stop ;; restart) restart ;; reload) reload ;; condrestart) condrestart ;; status) rhstatus ;; check) check ;; *) echo $"Usage: $BASENAME {start|stop|restart|reload|condrestart|status|check}" exit 1 esac exit $? EOF chmod +x /etc/init.d/haproxy
    #准备配置文件 [root@haproxy01 haproxy
    -1.8.17]# cat /etc/haproxy/haproxy.cfg global maxconn 100000 chroot /opt/apply/haproxy uid 99 gid 99 daemon nbproc 1 pidfile /opt/apply/haproxy/run/haproxy.pid log 127.0.0.1 local3 info defaults option http-keep-alive option forwardfor maxconn 100000 mode http timeout connect 300000ms timeout client 300000ms timeout server 300000ms listen stats mode http bind 0.0.0.0:9999 stats enable log global stats uri /haproxy-status stats auth haadmin:q1w2e3r4ys #启动haproxy systemctl daemon-reload systemctl start haproxy chkconfig --add haproxy chkconfig haproxy on #开启haproxy的日志 yum -y install rsyslog vim /etc/rsyslog.conf $ModLoad imudp $UDPServerRun 514 local3.* /var/log/haproxy.log #保存后的日志目录 systemctl enable rsyslog systemctl restart rsyslog
    #部署keepalived
    
    yum -y install libnfnetlink-devel libnfnetlink ipvsadm libnl libnl-devel 
    libnl3 libnl3-devel lm_sensors-libs net-snmp-agent-libs net-snmp-libs openssh-server openssh-clients openssl 
    openssl-devel automake iproute
    cd /usr/local/src
    wget https://www.keepalived.org/software/keepalived-1.4.5.tar.gz
    tar xf keepalived-1.4.5.tar.gz
    cd keepalived-1.4.5
    ./configure --prefix=/opt/apply/keepalived --disable-fwmark
    make && make install
    #复制相关配置文件及启动脚本
    cp /usr/local/src/keepalived-1.4.5/keepalived/etc/init.d/keepalived.rh.init /etc/sysconfig/keepalived.sysconfig
    cp /usr/local/src/keepalived-1.4.5/keepalived/keepalived.service /usr/lib/systemd/system/
    cp /usr/local/src/keepalived-1.4.5/bin/keepalived /usr/sbin/
    修改启动脚本
    [Unit]
    Description=LVS and VRRP High Availability Monitor
    After= network-online.target syslog.target
    Wants=network-online.target
    [Service]
    Type=forking
    PIDFile=/var/run/keepalived.pid
    KillMode=process
    EnvironmentFile=-/etc/sysconfig/keepalived
    ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS
    ExecReload=/bin/kill -HUP $MAINPID
    [Install]
    WantedBy=multi-user.target
    #启动
    systemctl daemon-reload
    systemctl enable keepalived.service
    systemctl restart keepalived.service
    案例:配置高可用master节点
    1)配置haproxy (在两个节点上分别配置)
    global
    maxconn 100000
    chroot /opt/apply/haproxy
    uid 99
    gid 99
    daemon
    nbproc 1
    pidfile /opt/apply/haproxy/run/haproxy.pid
    log 127.0.0.1 local3 info
    defaults
    option http-keep-alive
    option forwardfor
    maxconn 100000
    mode http
    timeout connect 300000ms
    timeout client 300000ms
    timeout server 300000ms
    listen stats
    mode http
    bind 0.0.0.0:9999
    stats enable
    log global
    stats uri /haproxy-status
    stats auth haadmin:q1w2e3r4ys
    listen k8s-api-server-6443
    bind 192.168.31.80:6443
    mode tcp
    server k8s-master01-72 192.168.31.72:6443 check inter 2000 fall 3 rise 5
    server k8s-master02-73 192.168.31.73:6443 check inter 2000 fall 3 rise 5
    server k8s-master03-75 192.168.31.75:6443 check inter 2000 fall 3 rise 5
    #配置keepalived
    [root@haproxy01 apply]# vim /etc/keepalived/keepalived.conf
    global_defs {
    notification_email {
    xionghaihua@distrii.com
    }
    notification_email_from it@distrii.com
    smtp_server smtp.distrii.com
    smtp_connect_timeout 30
    router_id LVS_DEVEL
    }
    vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 80
    priority 100
    advert_int 1
    #unicast_src_ip 192.168.31.70
    #unicast_peer {
    # 192.168.31.71
    #}
    authentication {
    auth_type PASS
    auth_pass Aa123321
    }
    virtual_ipaddress {
    192.168.31.80 dev eth0 label eth0:0
    }
    }
    [root@haproxy02 apply]# vim /etc/keepalived/keepalived.conf
    global_defs {
    notification_email {
    xionghaihua@distrii.com
    }
    notification_email_from it@distrii.com
    smtp_server smtp.distrii.com
    smtp_connect_timeout 3
    router_id LVS_DEVEL
    }
    vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 80
    priority 98
    advert_int 1
    #unicast_src_ip 192.168.31.71
    #unicast_peer {
    # 192.168.31.70
    #}
    authentication {
    auth_type PASS
    auth_pass Aa123321
    }
    virtual_ipaddress {
    192.168.31.80 dev eth0 label eth0:0
    }
    }
    systemctl restart haproxy.service
    systemctl restart keepalived.service

    三)安装kubernetes1.17.3高可用集群

    1、安装docker19.03,各个节点操作

    #查看支持的版本
    
    yum list docker-ce --showduplicates |sort-r
    #下载19.03.7
    yum install -y docker-ce-19*
    systemctl enable docker && systemctl start docker
    #修改docker配置文件
    cat > /etc/docker/daemon.json <<EOF
    {
    "registry-mirrors": ["https://ziqva2l2.mirror.aliyuncs.com"],
    "insecure-registries": ["192.168.31.30"],
    "graph": "/data/docker",
    "exec-opts": ["native.cgroupdriver=systemd"],
    "log-driver": "json-file",
    "log-opts": {
    "max-size": "300m"
    },
    "storage-driver": "overlay2",
    "storage-opts": [
    "overlay2.override_kernel_check=true"
    ]
    }
    EOF
    #重启docker
    systemctl daemon-reload && systemctl restartdocker

    2)安装kubernetes1.17.3

    2.1)在master01-master03,worker01-worker08节点上操作
    
    yum install kubeadm-1.17.3 kubelet-1.17.3 -y
    systemctl enable kubelet
    2.2)上传镜像到k8s所有节点上,手动解压镜像
    链接:https://pan.baidu.com/s/10wWe9WNBspZaCSL_zspzpQ
    提取码:4k07
    
    docker load -i kube-apiserver.tar.gz
    docker load -i kube-scheduler.tar.gz
    docker load -i kube-controller-manager.tar.gz
    docker load -i pause.tar.gz
    docker load -i cordns.tar.gz
    docker load -i etcd.tar.gz
    docker load -i kube-proxy.tar.gz
    docker load -i cni.tar.gz
    docker load -i calico-node.tar.gz
    docker load -i kubernetes-dashboard_1_10.tar.gz
    docker load -i metrics-server-amd64_0_3_1.tar.gz
    docker load -i addon.tar.gz
    docker load -i traefik_1_7_9.tar.gz
    docker load -i dashboard_2_0_0.tar.gz
    docker load -i metrics-scrapter-1-0-1.tar.gz

    2.3)在master1节点初始化k8s集群,在master1上操作如下

    [root@master01 k8s]# cat /data/k8s/kubeadm-config.yaml
    
    apiVersion: kubeadm.k8s.io/v1beta2
    kind: ClusterConfiguration
    kubernetesVersion: v1.17.3
    controlPlaneEndpoint: "192.168.31.80:6443"
    apiServer:
    certSANs:
    - 192.168.31.72
    - 192.168.31.73
    - 192.168.31.75
    - 192.168.31.80
    - 192.168.31.82
    - 192.168.31.83
    - 192.168.31.84
    - 192.168.31.86
    - 192.168.31.69
    - 192.168.31.64
    - 192.168.31.67
    - 192.168.31.68
    networking:
    podSubnet: "10.241.0.0/16"
    dnsDomain: "ziioffice.cn"
    serviceSubnet: "10.96.0.0/12"
    #imageRepository: registry.aliyuncs.com/kubernetes
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: ipvs
    #初始化k8s集群
    kubeadm init --config kubeadm-config.yaml
    Your Kubernetes control-plane has initialized successfully!
    To start using your cluster, you need to run the following as a regular user:
    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
    https://kubernetes.io/docs/concepts/cluster-administration/addons/
    You can now join any number of control-plane nodes by copying certificate authorities
    and service account keys on each node and then running the following as root:
    kubeadm join 192.168.31.80:6443 --token hknex2.0fo9d2ewerpucqde 
    --discovery-token-ca-cert-hash sha256:12487231d2fa223ca4708359ca5f32db238337c024b8caf083d8c0956e640afc 
    --control-plane
    Then you can join any number of worker nodes by running the following on each as root:
    kubeadm join 192.168.31.80:6443 --token hknex2.0fo9d2ewerpucqde 
    --discovery-token-ca-cert-hash sha256:12487231d2fa223ca4708359ca5f32db238337c024b8caf083d8c0956e640afc

    2.4)在master1节点执行如下,这样才能有权限操作k8s资源

    mkdir -p $HOME/.kube
    
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config

    2.5)安装网络calico插件

    # Calico Version v3.5.3
    # https://docs.projectcalico.org/v3.5/releases#v3.5.3
    # This manifest includes the following component versions:
    # calico/node:v3.5.3
    # calico/cni:v3.5.3

    # This ConfigMap is used to configure a self-hosted Calico installation.
    kind: ConfigMap
    apiVersion: v1
    metadata:
    name: calico-config
    namespace: kube-system
    data:
    # Typha is disabled.
    typha_service_name: "none"
    # Configure the Calico backend to use.
    calico_backend: "bird"

    # Configure the MTU to use
    veth_mtu: "1440"

    # The CNI network configuration to install on each node. The special
    # values in this config will be automatically populated.
    cni_network_config: |-
    {
    "name": "k8s-pod-network",
    "cniVersion": "0.3.0",
    "plugins": [
    {
    "type": "calico",
    "log_level": "info",
    "datastore_type": "kubernetes",
    "nodename": "__KUBERNETES_NODE_NAME__",
    "mtu": __CNI_MTU__,
    "ipam": {
    "type": "host-local",
    "subnet": "usePodCidr"
    },
    "policy": {
    "type": "k8s"
    },
    "kubernetes": {
    "kubeconfig": "__KUBECONFIG_FILEPATH__"
    }
    },
    {
    "type": "portmap",
    "snat": true,
    "capabilities": {"portMappings": true}
    }
    ]
    }

    ---

    # This manifest installs the calico/node container, as well
    # as the Calico CNI plugins and network config on
    # each master and worker node in a Kubernetes cluster.
    kind: DaemonSet
    apiVersion: apps/v1
    metadata:
    name: calico-node
    namespace: kube-system
    labels:
    k8s-app: calico-node
    spec:
    selector:
    matchLabels:
    k8s-app: calico-node
    updateStrategy:
    type: RollingUpdate
    rollingUpdate:
    maxUnavailable: 1
    template:
    metadata:
    labels:
    k8s-app: calico-node
    annotations:
    # This, along with the CriticalAddonsOnly toleration below,
    # marks the pod as a critical add-on, ensuring it gets
    # priority scheduling and that its resources are reserved
    # if it ever gets evicted.
    scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
    nodeSelector:
    beta.kubernetes.io/os: linux
    hostNetwork: true
    tolerations:
    # Make sure calico-node gets scheduled on all nodes.
    - effect: NoSchedule
    operator: Exists
    # Mark the pod as a critical add-on for rescheduling.
    - key: CriticalAddonsOnly
    operator: Exists
    - effect: NoExecute
    operator: Exists
    serviceAccountName: calico-node
    # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
    # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
    terminationGracePeriodSeconds: 0
    initContainers:
    # This container installs the Calico CNI binaries
    # and CNI network config file on each node.
    - name: install-cni
    image: quay.io/calico/cni:v3.5.3
    command: ["/install-cni.sh"]
    env:
    # Name of the CNI config file to create.
    - name: CNI_CONF_NAME
    value: "10-calico.conflist"
    # The CNI network config to install on each node.
    - name: CNI_NETWORK_CONFIG
    valueFrom:
    configMapKeyRef:
    name: calico-config
    key: cni_network_config
    # Set the hostname based on the k8s node name.
    - name: KUBERNETES_NODE_NAME
    valueFrom:
    fieldRef:
    fieldPath: spec.nodeName
    # CNI MTU Config variable
    - name: CNI_MTU
    valueFrom:
    configMapKeyRef:
    name: calico-config
    key: veth_mtu
    # Prevents the container from sleeping forever.
    - name: SLEEP
    value: "false"
    volumeMounts:
    - mountPath: /host/opt/cni/bin
    name: cni-bin-dir
    - mountPath: /host/etc/cni/net.d
    name: cni-net-dir
    containers:
    # Runs calico/node container on each Kubernetes node. This
    # container programs network policy and routes on each
    # host.
    - name: calico-node
    image: quay.io/calico/node:v3.5.3
    env:
    # Use Kubernetes API as the backing datastore.
    - name: DATASTORE_TYPE
    value: "kubernetes"
    # Wait for the datastore.
    - name: WAIT_FOR_DATASTORE
    value: "true"
    # Set based on the k8s node name.
    - name: NODENAME
    valueFrom:
    fieldRef:
    fieldPath: spec.nodeName
    # Choose the backend to use.
    - name: CALICO_NETWORKING_BACKEND
    valueFrom:
    configMapKeyRef:
    name: calico-config
    key: calico_backend
    # Cluster type to identify the deployment type
    - name: CLUSTER_TYPE
    value: "k8s,bgp"
    # Auto-detect the BGP IP address.
    - name: IP
    value: "autodetect"
    - name: IP_AUTODETECTION_METHOD
    value: "can-reach=192.168.31.82"
    # Enable IPIP
    - name: CALICO_IPV4POOL_IPIP
    value: "Always"
    # Set MTU for tunnel device used if ipip is enabled
    - name: FELIX_IPINIPMTU
    valueFrom:
    configMapKeyRef:
    name: calico-config
    key: veth_mtu
    # The default IPv4 pool to create on startup if none exists. Pod IPs will be
    # chosen from this range. Changing this value after installation will have
    # no effect. This should fall within `--cluster-cidr`.
    - name: CALICO_IPV4POOL_CIDR
    value: "10.241.0.0/16"
    # Disable file logging so `kubectl logs` works.
    - name: CALICO_DISABLE_FILE_LOGGING
    value: "true"
    # Set Felix endpoint to host default action to ACCEPT.
    - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
    value: "ACCEPT"
    # Disable IPv6 on Kubernetes.
    - name: FELIX_IPV6SUPPORT
    value: "false"
    # Set Felix logging to "info"
    - name: FELIX_LOGSEVERITYSCREEN
    value: "info"
    - name: FELIX_HEALTHENABLED
    value: "true"
    securityContext:
    privileged: true
    resources:
    requests:
    cpu: 250m
    livenessProbe:
    httpGet:
    path: /liveness
    port: 9099
    host: localhost
    periodSeconds: 10
    initialDelaySeconds: 10
    failureThreshold: 6
    readinessProbe:
    exec:
    command:
    - /bin/calico-node
    - -bird-ready
    - -felix-ready
    periodSeconds: 10
    volumeMounts:
    - mountPath: /lib/modules
    name: lib-modules
    readOnly: true
    - mountPath: /run/xtables.lock
    name: xtables-lock
    readOnly: false
    - mountPath: /var/run/calico
    name: var-run-calico
    readOnly: false
    - mountPath: /var/lib/calico
    name: var-lib-calico
    readOnly: false
    volumes:
    # Used by calico/node.
    - name: lib-modules
    hostPath:
    path: /lib/modules
    - name: var-run-calico
    hostPath:
    path: /var/run/calico
    - name: var-lib-calico
    hostPath:
    path: /var/lib/calico
    - name: xtables-lock
    hostPath:
    path: /run/xtables.lock
    type: FileOrCreate
    # Used to install CNI.
    - name: cni-bin-dir
    hostPath:
    path: /opt/cni/bin
    - name: cni-net-dir
    hostPath:
    path: /etc/cni/net.d
    ---

    apiVersion: v1
    kind: ServiceAccount
    metadata:
    name: calico-node
    namespace: kube-system

    ---
    # Create all the CustomResourceDefinitions needed for
    # Calico policy and networking mode.

    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
    name: felixconfigurations.crd.projectcalico.org
    spec:
    scope: Cluster
    group: crd.projectcalico.org
    version: v1
    names:
    kind: FelixConfiguration
    plural: felixconfigurations
    singular: felixconfiguration
    ---

    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
    name: bgppeers.crd.projectcalico.org
    spec:
    scope: Cluster
    group: crd.projectcalico.org
    version: v1
    names:
    kind: BGPPeer
    plural: bgppeers
    singular: bgppeer

    ---

    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
    name: bgpconfigurations.crd.projectcalico.org
    spec:
    scope: Cluster
    group: crd.projectcalico.org
    version: v1
    names:
    kind: BGPConfiguration
    plural: bgpconfigurations
    singular: bgpconfiguration

    ---

    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
    name: ippools.crd.projectcalico.org
    spec:
    scope: Cluster
    group: crd.projectcalico.org
    version: v1
    names:
    kind: IPPool
    plural: ippools
    singular: ippool

    ---

    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
    name: hostendpoints.crd.projectcalico.org
    spec:
    scope: Cluster
    group: crd.projectcalico.org
    version: v1
    names:
    kind: HostEndpoint
    plural: hostendpoints
    singular: hostendpoint

    ---

    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
    name: clusterinformations.crd.projectcalico.org
    spec:
    scope: Cluster
    group: crd.projectcalico.org
    version: v1
    names:
    kind: ClusterInformation
    plural: clusterinformations
    singular: clusterinformation

    ---

    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
    name: globalnetworkpolicies.crd.projectcalico.org
    spec:
    scope: Cluster
    group: crd.projectcalico.org
    version: v1
    names:
    kind: GlobalNetworkPolicy
    plural: globalnetworkpolicies
    singular: globalnetworkpolicy

    ---

    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
    name: globalnetworksets.crd.projectcalico.org
    spec:
    scope: Cluster
    group: crd.projectcalico.org
    version: v1
    names:
    kind: GlobalNetworkSet
    plural: globalnetworksets
    singular: globalnetworkset

    ---

    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
    name: networkpolicies.crd.projectcalico.org
    spec:
    scope: Namespaced
    group: crd.projectcalico.org
    version: v1
    names:
    kind: NetworkPolicy
    plural: networkpolicies
    singular: networkpolicy
    ---

    # Include a clusterrole for the calico-node DaemonSet,
    # and bind it to the calico-node serviceaccount.
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
    name: calico-node
    rules:
    # The CNI plugin needs to get pods, nodes, and namespaces.
    - apiGroups: [""]
    resources:
    - pods
    - nodes
    - namespaces
    verbs:
    - get
    - apiGroups: [""]
    resources:
    - endpoints
    - services
    verbs:
    # Used to discover service IPs for advertisement.
    - watch
    - list
    # Used to discover Typhas.
    - get
    - apiGroups: [""]
    resources:
    - nodes/status
    verbs:
    # Needed for clearing NodeNetworkUnavailable flag.
    - patch
    # Calico stores some configuration information in node annotations.
    - update
    # Watch for changes to Kubernetes NetworkPolicies.
    - apiGroups: ["networking.k8s.io"]
    resources:
    - networkpolicies
    verbs:
    - watch
    - list
    # Used by Calico for policy information.
    - apiGroups: [""]
    resources:
    - pods
    - namespaces
    - serviceaccounts
    verbs:
    - list
    - watch
    # The CNI plugin patches pods/status.
    - apiGroups: [""]
    resources:
    - pods/status
    verbs:
    - patch
    # Calico monitors various CRDs for config.
    - apiGroups: ["crd.projectcalico.org"]
    resources:
    - globalfelixconfigs
    - felixconfigurations
    - bgppeers
    - globalbgpconfigs
    - bgpconfigurations
    - ippools
    - globalnetworkpolicies
    - globalnetworksets
    - networkpolicies
    - clusterinformations
    - hostendpoints
    verbs:
    - get
    - list
    - watch
    # Calico must create and update some CRDs on startup.
    - apiGroups: ["crd.projectcalico.org"]
    resources:
    - ippools
    - felixconfigurations
    - clusterinformations
    verbs:
    - create
    - update
    # Calico stores some configuration information on the node.
    - apiGroups: [""]
    resources:
    - nodes
    verbs:
    - get
    - list
    - watch
    # These permissions are only requried for upgrade from v2.6, and can
    # be removed after upgrade or on fresh installations.
    - apiGroups: ["crd.projectcalico.org"]
    resources:
    - bgpconfigurations
    - bgppeers
    verbs:
    - create
    - update
    ---
    apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: ClusterRoleBinding
    metadata:
    name: calico-node
    roleRef:
    apiGroup: rbac.authorization.k8s.io
    kind: ClusterRole
    name: calico-node
    subjects:
    - kind: ServiceAccount
    name: calico-node
    namespace: kube-system
    ---

    #执行安装

    kubectl apply -f calico.yaml

    2.6) 把master01节点的证书拷贝到master02和master03上

    cd /root && mkdir -p /etc/kubernetes/pki/etcd && mkdir -p ~/.kube/
    
    #拷贝证书
    scp /etc/kubernetes/pki/ca.crt master02:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/ca.key master02:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.key master02:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.pub master02:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.crt master02:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.key master02:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/etcd/ca.crt master02:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/pki/etcd/ca.key master02:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/pki/ca.crt master03:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/ca.key master03:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.key master03:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.pub master03:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.crt master03:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.key master03:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/etcd/ca.crt master03:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/pki/etcd/ca.key master03:/etc/kubernetes/pki/etcd/

    2.7)证书拷贝之后在master02和master03上执行如下,形成集群

    —control-plane:这个参数表示加入到k8s集群的是master节点
    
    #添加master节点
    
    kubeadm join 192.168.31.80:6443 --token hknex2.0fo9d2ewerpucqde 
    --discovery-token-ca-cert-hash sha256:12487231d2fa223ca4708359ca5f32db238337c024b8caf083d8c0956e640afc 
    --control-plane
    #在master2和master3上操作:
    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf$HOME/.kube/config
    sudo chown $(id -u):$(id -g)$HOME/.kube/config
    #kubectl get nodes 验证如下
    [root@master01 ~]# kubectl get node
    NAME STATUS ROLES AGE VERSION
    master01.ziioffice.com Ready master 12m v1.17.3
    master02.ziioffice.com Ready master 63s v1.17.3
    master03.ziioffice.com Ready master 53s v1.17.3

    2.8)添加worker节点

    kubeadm join 192.168.31.80:6443 --token hknex2.0fo9d2ewerpucqde 
    --discovery-token-ca-cert-hash sha256:12487231d2fa223ca4708359ca5f32db238337c024b8caf083d8c0956e640afc
    #在master节点上操作
    [root@master01 ~]# kubectl get node
    [root@master01 kubernetes-1.17.3]# kubectl get nodes
    NAME STATUS ROLES AGE VERSION
    master01.ziioffice.com Ready master 83d v1.17.3
    master02.ziioffice.com Ready master 83d v1.17.3
    master03.ziioffice.com Ready master 83d v1.17.3
    worker01.ziioffice.com Ready <none> 83d v1.17.3
    worker02.ziioffice.com Ready <none> 83d v1.17.3
    worker03.ziioffice.com Ready <none> 83d v1.17.3
    worker04.ziioffice.com Ready <none> 82d v1.17.3
    worker05.ziioffice.com Ready <none> 49d v1.17.3
    worker06.ziioffice.com Ready <none> 49d v1.17.3
    worker07.ziioffice.com Ready <none> 47d v1.17.3
    worker08.ziioffice.com Ready <none> 47d v1.17.3

    三)安装部署addons插件

    1)安装dashboard2.0

    1.1)装载镜像

    docker load -i dashboard_2_0_0.tar.gz
    
    docker load -i metrics-scrapter-1-0-1.tar.gz

    1.2)编写yaml文件

    # Copyright 2017 The Kubernetes Authors.
    #
    # Licensed under the Apache License, Version 2.0 (the "License");
    # you may not use this file except in compliance with the License.
    # You may obtain a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS,
    # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # See the License for the specific language governing permissions and
    # limitations under the License.
    
    apiVersion: v1
    kind: Namespace
    metadata:
      name: kubernetes-dashboard
    
    ---
    
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      name: kubernetes-dashboard
      namespace: kubernetes-dashboard
    
    ---
    
    kind: Service
    apiVersion: v1
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      name: kubernetes-dashboard
      namespace: kubernetes-dashboard
    spec:
      type: NodePort
      ports:
        - port: 443
          targetPort: 8443
          nodePort: 30982
      selector:
        k8s-app: kubernetes-dashboard
    
    ---
    
    apiVersion: v1
    kind: Secret
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      name: kubernetes-dashboard-certs
      namespace: kubernetes-dashboard
    type: Opaque
    
    ---
    
    apiVersion: v1
    kind: Secret
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      name: kubernetes-dashboard-csrf
      namespace: kubernetes-dashboard
    type: Opaque
    data:
      csrf: ""
    
    ---
    
    apiVersion: v1
    kind: Secret
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      name: kubernetes-dashboard-key-holder
      namespace: kubernetes-dashboard
    type: Opaque
    
    ---
    
    kind: ConfigMap
    apiVersion: v1
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      name: kubernetes-dashboard-settings
      namespace: kubernetes-dashboard
    
    ---
    
    kind: Role
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      name: kubernetes-dashboard
      namespace: kubernetes-dashboard
    rules:
      # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
      - apiGroups: [""]
        resources: ["secrets"]
        resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
        verbs: ["get", "update", "delete"]
        # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
      - apiGroups: [""]
        resources: ["configmaps"]
        resourceNames: ["kubernetes-dashboard-settings"]
        verbs: ["get", "update"]
        # Allow Dashboard to get metrics.
      - apiGroups: [""]
        resources: ["services"]
        resourceNames: ["heapster", "dashboard-metrics-scraper"]
        verbs: ["proxy"]
      - apiGroups: [""]
        resources: ["services/proxy"]
        resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
        verbs: ["get"]
    
    ---
    
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      name: kubernetes-dashboard
    rules:
      # Allow Metrics Scraper to get metrics from the Metrics server
      - apiGroups: ["metrics.k8s.io"]
        resources: ["pods", "nodes"]
        verbs: ["get", "list", "watch"]
    
    ---
    
    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      name: kubernetes-dashboard
      namespace: kubernetes-dashboard
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: Role
      name: kubernetes-dashboard
    subjects:
      - kind: ServiceAccount
        name: kubernetes-dashboard
        namespace: kubernetes-dashboard
    
    ---
    
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: kubernetes-dashboard
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: kubernetes-dashboard
    subjects:
      - kind: ServiceAccount
        name: kubernetes-dashboard
        namespace: kubernetes-dashboard
    
    ---
    
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      name: kubernetes-dashboard
      namespace: kubernetes-dashboard
    spec:
      replicas: 1
      revisionHistoryLimit: 10
      selector:
        matchLabels:
          k8s-app: kubernetes-dashboard
      template:
        metadata:
          labels:
            k8s-app: kubernetes-dashboard
        spec:
          containers:
            - name: kubernetes-dashboard
              image: kubernetesui/dashboard:v2.0.0-beta8
              imagePullPolicy: Always
              ports:
                - containerPort: 8443
                  protocol: TCP
              args:
                - --auto-generate-certificates
                - --namespace=kubernetes-dashboard
                # Uncomment the following line to manually specify Kubernetes API server Host
                # If not specified, Dashboard will attempt to auto discover the API server and connect
                # to it. Uncomment only if the default does not work.
                # - --apiserver-host=http://my-address:port
              volumeMounts:
                - name: kubernetes-dashboard-certs
                  mountPath: /certs
                  # Create on-disk volume to store exec logs
                - mountPath: /tmp
                  name: tmp-volume
              livenessProbe:
                httpGet:
                  scheme: HTTPS
                  path: /
                  port: 8443
                initialDelaySeconds: 30
                timeoutSeconds: 30
              securityContext:
                allowPrivilegeEscalation: false
                readOnlyRootFilesystem: true
                runAsUser: 1001
                runAsGroup: 2001
          volumes:
            - name: kubernetes-dashboard-certs
              secret:
                secretName: kubernetes-dashboard-certs
            - name: tmp-volume
              emptyDir: {}
          serviceAccountName: kubernetes-dashboard
          nodeSelector:
            "beta.kubernetes.io/os": linux
          # Comment the following tolerations if Dashboard must not be deployed on master
          tolerations:
            - key: node-role.kubernetes.io/master
              effect: NoSchedule
    
    ---
    
    kind: Service
    apiVersion: v1
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      name: dashboard-metrics-scraper
      namespace: kubernetes-dashboard
    spec:
      ports:
        - port: 8000
          targetPort: 8000
      selector:
        k8s-app: dashboard-metrics-scraper
    
    ---
    
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      name: dashboard-metrics-scraper
      namespace: kubernetes-dashboard
    spec:
      replicas: 1
      revisionHistoryLimit: 10
      selector:
        matchLabels:
          k8s-app: dashboard-metrics-scraper
      template:
        metadata:
          labels:
            k8s-app: dashboard-metrics-scraper
          annotations:
            seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
        spec:
          containers:
            - name: dashboard-metrics-scraper
              image: kubernetesui/metrics-scraper:v1.0.1
              ports:
                - containerPort: 8000
                  protocol: TCP
              livenessProbe:
                httpGet:
                  scheme: HTTP
                  path: /
                  port: 8000
                initialDelaySeconds: 30
                timeoutSeconds: 30
              volumeMounts:
              - mountPath: /tmp
                name: tmp-volume
              securityContext:
                allowPrivilegeEscalation: false
                readOnlyRootFilesystem: true
                runAsUser: 1001
                runAsGroup: 2001
          serviceAccountName: kubernetes-dashboard
          nodeSelector:
            "beta.kubernetes.io/os": linux
          # Comment the following tolerations if Dashboard must not be deployed on master
          tolerations:
            - key: node-role.kubernetes.io/master
              effect: NoSchedule
          volumes:
            - name: tmp-volume
              emptyDir: {}

    1.3)执行部署并验证

    #执行部署
    kubectl apply -f kubernetes-dashboard.yaml
    [root@master01 kubernetes-1.17.3]# kubectl get pods -n kubernetes-dashboard
    NAME                                         READY   STATUS    RESTARTS   AGE
    dashboard-metrics-scraper-6b77d4fbb9-bv4nh   1/1     Running   0          13d
    kubernetes-dashboard-5996555fd8-t94zn        1/1     Running   0          14d
    [root@master01 kubernetes-1.17.3]# kubectl get svc -n kubernetes-dashboard
    NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
    dashboard-metrics-scraper   ClusterIP   10.102.135.99   <none>        8000/TCP        83d
    kubernetes-dashboard        NodePort    10.102.42.5     <none>        443:30982/TCP   83d
    访问地址: https://192.168.31.72:30982/

    1.4)创建管理员token

    1.2.1)通过token登录
    #创建管理员token,可查看任何空间权限 kubectl create clusterrolebinding dashboard
    -cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard
    kubectl get secret
    -n kubernetes-dashboard
    kubectl describe secret kubernetes
    -dashboard-token-jxds4 -n kubernetes-dashboard
    token: eyJhbGciOiJSUzI1NiIsImtpZCI6IndMbWR0S3I5S1ptZGtLU1dOUHhMWnZXeU95OXlER29RZ2hzZVktQkNoRUUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1qeGRzNCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImU3ZTQ3NzhkLTE5YTgtNGJkNi1iMmRlLTNiZmIxYjM3NGVjYSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.VBhTKHceR2
    -rD0_HkUj5KA_Nk_mS_k9qsnq5ZJzeezpVUaPFaaSXgp4yCZzOTYyRlgpFduobOusdlJKCM-4qmu7s2h5xNu1fn53LCpRY0Se17Q9pRQbZ8EvlHKraxqGlA9dPcXKQYkS61OSTPTaGPUOuYmEvs_TW7X60P6LH-Vg6uZEIiQCV40lgnwBdO61LUuCZHnEMzDKwtNL25ire0ZQrWfjGG6NDGYF-tlNojImFE_qQcIc6fFidRtqJE1VcYw1REf1iIPAXY5HAHiXfOTRprx9MDRx4_5dpuq8Se2xmTcly05t_7gljGnmxCc4B2Fzf7zqwK-I46-KQdDJN7A

    1.2.2)通过kubeconfig登录 kubectl config set-cluster kubernetes --certificate-authority=./ca.crt --server="https://192.168.31.80:6443" --embed-certs=true --kubeconfig=/root/kube-admin.conf
    ADMIN_TOKEN
    =$(kubectl get secret kubernetes-dashboard-token-jxds4 -n kubernetes-dashboard -o jsonpath={.data.token}|base64 -d)
    kubectl config set
    -credentials all-admin --token=$ADMIN_TOKEN --kubeconfig=/root/kube-admin.conf
    kubectl config set
    -context all-admin@kubernetes --cluster=kubernetes --user=all-admin --kubeconfig=/root/kube-admin.conf
    kubectl config use
    -context all-admin@kubernetes --kubeconfig=/root/kube-admin.conf
    #把刚才的kubeconfig文件kube
    -admin.conf复制到桌面 浏览器访问时使用kubeconfig认证,把刚才的kube-admin.conf导入到web界面,那么就可以登陆了

    1.5)创建只能管理指定名称空间的token

    创建普通用户dev-admin的token,只能管理dev名称空间
    #创建用户
    kubectl create namespace dev
    kubectl create serviceaccount dev-admin -n dev
    kubectl get secret -n dev|grep dev-admin
    dev-admin-token-bjqv7   kubernetes.io/service-account-token   3      39s
    [root@master01 pki]# kubectl describe secret dev
    -admin-token-bjqv7 -n dev 显示如下: eyJhbGciOiJSUzI1NiIsImtpZCI6IndMbWR0S3I5S1ptZGtLU1dOUHhMWnZXeU95OXlER29RZ2hzZVktQkNoRUUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZXYiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlY3JldC5uYW1lIjoiZGV2LWFkbWluLXRva2VuLWJqcXY3Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImRldi1hZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAzYzBiYWM3LTA0ZTctNDYwYi04Y2ExLTIzMmNiNjExZjA4OSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZXY6ZGV2LWFkbWluIn0.MOlHeK6zmJp31vJIkgRRGL0niTMZa2qrdsYmLynXf0yFHJpLbUasPj4b3T5Bp46YRYdMNV65ODK2b1iAEE4li2mU8ZzeyJZdUsNUYdbXO0D6N9IrbcxScep6me6BFAYnSyabN4oOP7S1K1BeitcCs48_1YLbuFCAVRO-jcNYektUPpoigNH2dp8z7cNSU2KbouY2YY_0YZWr1EcyGMrk5X7YiMPKfka-YPhB_CQO93MS2gJVXvuVoOthZukcH8lFybFCPDWtJOolxa-fCXFfL4V8NQ7_daQ39JHFS2UGUXPzUxiNdZvHAlBsG5jwxOCfB8el98SfFpq1SzvbaMqpCQ #给用户授权 [root@master01 pki]# kubectl create rolebinding dev-admin --clusterrole=cluster-admin --serviceaccount=dev:dev-admin -n dev rolebinding.rbac.authorization.k8s.io/dev-admin created

    #通过kubeconfig登录 把token令牌封装成kubeconfig,通过kubeconfig登陆dashboard,创建一个只能管理指定名称空间的kubeconfig文件 在k8s的master节点操作 [root@master01 pki]# kubectl get secret -n dev|grep dev-admin dev-admin-token-bjqv7 kubernetes.io/service-account-token 3 30m
    cd
    /etc/kubernetes/pki #创建cluster kubectl config set-cluster kubernetes --certificate-authority=./ca.crt --server="https://192.168.31.80:6443" --embed-certs=true --kubeconfig=/root/kube-dev.conf #开始创建credentials DEF_NS_ADMIN_TOKEN=$(kubectl get secret dev-admin-token-bjqv7 -n dev -o jsonpath={.data.token}|base64 -d) kubectl config set-credentials dev-admin --token=$DEF_NS_ADMIN_TOKEN --kubeconfig=/root/kube-dev.conf #创建context kubectl config set-context dev-admin@kubernetes --cluster=kubernetes --user=dev-admin --kubeconfig=/root/kube-dev.conf #切换context的current-context是dev-admin@kubernetes kubectl config use-context dev-admin@kubernetes --kubeconfig=/root/kube-dev.conf #把刚才的kubeconfig文件kube-dev.conf复制到桌面 浏览器访问时使用kubeconfig认证,把刚才的kube-dev.conf导入到web界面,那么就可以登陆了

    2) 部署ingress controller

    2.1)#生成openssl.cnf文件,这个文件里面把签发证书时需要的参数已经定义好

    echo """
    
    [req]
    distinguished_name = req_distinguished_name
    prompt = yes
    [ req_distinguished_name ]
    countryName = Country Name (2 letter code)
    countryName_value = CN
    stateOrProvinceName = State or Province Name (full name)
    stateOrProvinceName_value = Shanghai
    localityName = Locality Name (eg, city)
    localityName_value = Shanghai
    organizationName = Organization Name (eg, company)
    organizationName_value = Channelsoft
    organizationalUnitName = Organizational Unit Name (eg, section)
    organizationalUnitName_value = R & D Department
    commonName = Common Name (eg, your name or your server's hostname)
    commonName_value = *.ziioffice.com
    emailAddress = Email Address
    emailAddress_value = xionghaihua@distrii.com
    """ > ./openssl.cnf

    2.2)生成一个证书

    openssl req -newkey rsa:4096 -nodes -config ./openssl.cnf -days 3650 -x509 -out ./tls.crt -keyout ./tls.key
    
    #创建一个secret对象
    kubectl create -n kube-system secret tls ssl --cert ./tls.crt --key ./tls.key

    2.3)编写traefik.yaml文件

    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
      name: traefik-ingress-controller
    rules:
      - apiGroups:
          - ""
        resources:
          - services
          - endpoints
          - secrets
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - extensions
        resources:
          - ingresses
        verbs:
          - get
          - list
          - watch
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
      name: traefik-ingress-controller
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: traefik-ingress-controller
    subjects:
    - kind: ServiceAccount
      name: traefik-ingress-controller
      namespace: kube-system
    ---
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: traefik-ingress-controller
      namespace: kube-system
    ---
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: traefik-conf
      namespace: kube-system
    data:
      traefik.toml: |
        insecureSkipVerify = true
        defaultEntryPoints = ["http","https"]
        [entryPoints]
          [entryPoints.http]
          address = ":80"
          [entryPoints.https]
          address = ":443"
            [entryPoints.https.tls]
              [[entryPoints.https.tls.certificates]]
              CertFile = "/ssl/tls.crt"
              KeyFile = "/ssl/tls.key"
    ---
    kind: DaemonSet
    apiVersion: apps/v1
    metadata:
      name: traefik-ingress-controller
      namespace: kube-system
      labels:
        k8s-app: traefik-ingress-lb
    spec:
      selector:
          matchLabels:
            k8s-app: traefik-ingress-lb
            name: traefik-ingress-lb
      template:
        metadata:
          labels:
            k8s-app: traefik-ingress-lb
            name: traefik-ingress-lb
        spec:
          serviceAccountName: traefik-ingress-controller
          tolerations:
          - key: node-role.kubernetes.io/master
            effect: NoSchedule
          terminationGracePeriodSeconds: 60
          hostNetwork: true
          nodeSelector:
             ingress: traefik
          volumes:
          - name: ssl
            secret:
              secretName: ssl
          - name: config
            configMap:
              name: traefik-conf
          containers:
          - image: k8s.gcr.io/traefik:1.7.9
            name: traefik-ingress-lb
            ports:
            - name: http
              containerPort: 80
              hostPort: 80
            - name: admin
              containerPort: 8080
            securityContext:
              privileged: true
            args:
            - --configfile=/config/traefik.toml
            - -d
            - --web
            - --kubernetes
            volumeMounts:
            - mountPath: "/ssl"
              name: "ssl"
            - mountPath: "/config"
              name: "config"
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: traefik-ingress-service
    spec:
      selector:
        k8s-app: traefik-ingress-lb
      ports:
        - protocol: TCP
          port: 80
          name: web
        - protocol: TCP
          port: 8080
          name: admin
        - protocol: TCP
          port: 443
          name: https
      type: NodePort
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: traefik-web-ui
      namespace: kube-system
    spec:
      selector:
        k8s-app: traefik-ingress-lb
      ports:
      - port: 80
        targetPort: 8080
    ---
    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
      name: traefik-web-ui
      namespace: kube-system
      annotations:
        kubernetes.io/ingress.class: traefik
    spec:
      rules:
      - host: ingress.ziioffice.com
        http:
          paths:
          - backend:
              serviceName: traefik-web-ui
              servicePort: 80

    3.3)部署及验证

    [root@master01 kubernetes1.17.3]# kubectl apply -f traefik.yaml 
    验证
    [root@master01 kubernetes1.17.3]# kubectl get pods -n kube-system|grep traefik
    traefik-ingress-controller-blpwk                 1/1     Running   0          4m10s
    traefik-ingress-controller-v6nz8                 1/1     Running   0          4m10s

    3)安装metrics监控插件

    3.1)准备metrics.yaml文件

    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: metrics-server:system:auth-delegator
      labels:
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:auth-delegator
    subjects:
    - kind: ServiceAccount
      name: metrics-server
      namespace: kube-system
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
      name: metrics-server-auth-reader
      namespace: kube-system
      labels:
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: Role
      name: extension-apiserver-authentication-reader
    subjects:
    - kind: ServiceAccount
      name: metrics-server
      namespace: kube-system
    ---
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: metrics-server
      namespace: kube-system
      labels:
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      name: system:metrics-server
      labels:
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
    rules:
    - apiGroups:
      - ""
      resources:
      - pods
      - nodes
      - nodes/stats
      - namespaces
      verbs:
      - get
      - list
      - watch
    - apiGroups:
      - "extensions"
      resources:
      - deployments
      verbs:
      - get
      - list
      - update
      - watch
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: system:metrics-server
      labels:
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:metrics-server
    subjects:
    - kind: ServiceAccount
      name: metrics-server
      namespace: kube-system
    ---
    apiVersion: v1
    kind: ConfigMap
    metadata:
      name: metrics-server-config
      namespace: kube-system
      labels:
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: EnsureExists
    data:
      NannyConfiguration: |-
        apiVersion: nannyconfig/v1alpha1
        kind: NannyConfiguration
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: metrics-server
      namespace: kube-system
      labels:
        k8s-app: metrics-server
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
        version: v0.3.1
    spec:
      selector:
        matchLabels:
          k8s-app: metrics-server
          version: v0.3.1
      template:
        metadata:
          name: metrics-server
          labels:
            k8s-app: metrics-server
            version: v0.3.1
          annotations:
            scheduler.alpha.kubernetes.io/critical-pod: ''
            seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
        spec:
          priorityClassName: system-cluster-critical
          serviceAccountName: metrics-server
          containers:
          - name: metrics-server
            image: k8s.gcr.io/metrics-server-amd64:v0.3.1
            command:
            - /metrics-server
            - --metric-resolution=30s
            - --kubelet-preferred-address-types=InternalIP
            - --kubelet-insecure-tls
            ports:
            - containerPort: 443
              name: https
              protocol: TCP
          - name: metrics-server-nanny
            image: k8s.gcr.io/addon-resizer:1.8.4
            resources:
              limits:
                cpu: 100m
                memory: 300Mi
              requests:
                cpu: 5m
                memory: 50Mi
            env:
              - name: MY_POD_NAME
                valueFrom:
                  fieldRef:
                    fieldPath: metadata.name
              - name: MY_POD_NAMESPACE
                valueFrom:
                  fieldRef:
                    fieldPath: metadata.namespace
            volumeMounts:
            - name: metrics-server-config-volume
              mountPath: /etc/config
            command:
              - /pod_nanny
              - --config-dir=/etc/config
              - --cpu=300m
              - --extra-cpu=20m
              - --memory=200Mi
              - --extra-memory=10Mi
              - --threshold=5
              - --deployment=metrics-server
              - --container=metrics-server
              - --poll-period=300000
              - --estimator=exponential
              - --minClusterSize=2
          volumes:
            - name: metrics-server-config-volume
              configMap:
                name: metrics-server-config
          tolerations:
            - key: "CriticalAddonsOnly"
              operator: "Exists"
            - key: node-role.kubernetes.io/master
              effect: NoSchedule
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: metrics-server
      namespace: kube-system
      labels:
        addonmanager.kubernetes.io/mode: Reconcile
        kubernetes.io/cluster-service: "true"
        kubernetes.io/name: "Metrics-server"
    spec:
      selector:
        k8s-app: metrics-server
      ports:
      - port: 443
        protocol: TCP
        targetPort: https
    ---
    apiVersion: apiregistration.k8s.io/v1beta1
    kind: APIService
    metadata:
      name: v1beta1.metrics.k8s.io
      labels:
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
    spec:
      service:
        name: metrics-server
        namespace: kube-system
      group: metrics.k8s.io
      version: v1beta1
      insecureSkipTLSVerify: true
      groupPriorityMinimum: 100
      versionPriority: 100

    3.2)部署metrics并验证

    kubectl apply -f metrics.yaml
    
    #查看
    [root@master01 kubernetes1.17.3]# kubectl get pods -n kube-system|grep metri
    kube-state-metrics-77bf9cdd7f-km4wf 1/1 Running 0 14d
    metrics-server-5cf9669fbf-g2c52 2/2 Running 0 2m44s
    [root@master01 kubernetes1.
    17.3]# kubectl top nodes NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% master01.ziioffice.com 469m 11% 1997Mi 34% master02.ziioffice.com 374m 9% 1920Mi 32% master03.ziioffice.com 338m 8% 2099Mi 35% worker01.ziioffice.com 241m 6% 3203Mi 32% worker02.ziioffice.com 369m 9% 4499Mi 45% worker03.ziioffice.com 243m 6% 2828Mi 36% worker04.ziioffice.com 205m 5% 3328Mi 42% worker05.ziioffice.com 361m 4% 6586Mi 66% worker06.ziioffice.com 353m 4% 5907Mi 59% worker07.ziioffice.com 188m 4% 1535Mi 15% worker08.ziioffice.com 343m 8% 4134Mi 41%

    [root@master01 kubernetes1.17.3]# kubectl top pods -n dev NAME CPU(cores) MEMORY(bytes) admin-server-869fb8cc6c-5pc59 8m 449Mi aoweb-7775c9c489-b4jcd 1m 56Mi aoweb-7775c9c489-lzssm 1m 39Mi cms-server-5f49b8b5cd-fjmk5 65m 687Mi cmsweb-6fb78765c-8p8r6 1m 56Mi config-server-5484bdcf65-f6mmh 21m 492Mi dsharepage-785889fb88-r5j6j 1m 38Mi dvr-78cd7f68bc-6dqf8 1m 60Mi es-server-ddf66f96f-r5vtb 2m 562Mi eureka-server-0 17m 454Mi gateway-server-0 35m 681Mi jobadmin-server-688dbc74cf-chfjd 21m 517Mi maxoffice-web-bj-f79fb996b-2hv2p 1m 163Mi maxoffice-web-sh-5dbb68b7-66bq2 1m 165Mi middle-server-7458bcd49c-4jm7c 67m 729Mi operation-server-6966b8c567-q6q9k 59m 764Mi txmanager-server-0 21m 550Mi
  • 相关阅读:
    bash命令
    集群解析
    临时配置网络(ip,网关,dns)+永久配置
    nginx反向代理三台web
    源码安装nginx
    nginx反向代理+三台web+nfs共享存储实现集群配置
    jQuery学习笔记(一):入门【转】
    你是怎么把字符串“2016-11-16” 变为 “16/11/2016” 的? 【转】
    用css画出三角形【转】
    JS中常遇到的浏览器兼容问题和解决方法【转】
  • 原文地址:https://www.cnblogs.com/louis2008/p/kuberneter_install_01.html
Copyright © 2020-2023  润新知