• 第二课:部署Kubenetes集群部署-二进制安装


    4. master安装keepalived

    #master01
    yum install -y keepalived
    cat >/etc/keepalived/keepalived.conf <<EOF
    global_defs {
       route_id KUB_LVS
    }
    
    vrrp_script CheckMaster{
        script "curl -k https://192.168.68.1:6443"
        interval 3
        timeout 9
        fall 2
        rise 2
    }
    
    vrrp_instance VI_1 {
        state MASTER
        interface ens160
        virtual_router_id 51
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 111111
        }
        virtual_ipaddress {
            192.168.68.1/24 dev ens160
        }
        track_script{
            CheckMaster
        }
    }
    EOF
    
    #master02/03
    yum install -y keepalived
    cat >/etc/keepalived/keepalived.conf <<EOF
    global_defs {
       route_id KUB_LVS
    }
    
    vrrp_script CheckMaster{
        script "curl -k https://192.168.68.1:6443"
        interval 3
        timeout 9
        fall 2
        rise 2
    }
    
    vrrp_instance VI_1 {
        state SLAVE
        interface ens160
        virtual_router_id 51
        priority 90
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 111111
        }
        virtual_ipaddress {
            192.168.68.1/24 dev ens160
        }
        track_script{
            CheckMaster
        }
    }
    EOF
    
    systemctl enable keepalived && systemctl restart keepalived
    

    5. 配置证书

    5.1 下载自签名证书生成工具

    在分发机器master01上操作
    可以使用openssl或者cfssl工具生成
    本次使用cfssl生成字签证书。
    脚本cfssl.sh下载cfssl工具

    mkdir cfssl && cd cfssl
    cat >> cfssl.sh<<EOF
    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
    chmod +x cfssl*
    mv cfssl_linux-amd64 /usr/bin/cfssl
    mv cfssljson_linux-amd64 /usr/bin/cfssljson
    mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
    EOF
    chmod +x cfssl.sh
    sh cfssl.sh
    

    5.2 生成Etcd证书

    创建目录
    mkdir -p /root/etcd && cd /root/etcd

    证书配置
    Etcd证书配置

    #CA证书配置
    cat > ca-config.json <<EOF
    {
        "signing": {
          "default": {
            "expiry": "87600h"
          },
          "profiles": {
            "www": {
              "expiry": "89600h",
              "usages": [
                 "signing",
                 "key encipherment",
                 "server auth",
                 "client auth"
              ]
            }
          }  
        }
    }
    EOF
    
    #创建CA证书请求文件
    cat > ca-csr.json <<EOF
    {
        "CN": "etcd CA",
        "key": {
              "algo": "rsa",
              "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "Beijing",
                "ST": "Beijing"
            }
        ]
    }
    EOF
    
    #创建ETCD证书请求文件,可以把所有master IP加入csr文件中  
    cat > service-csr.json <<EOF
    {
        "CN": "etcd",
        "hosts": [
        "master01",
        "master02",
        "master03",
        "192.168.68.146",
        "192.168.68.147",
        "192.168.68.148"
        ],
        "key": {
           "algo": "rsa",
           "size": 2048
        },
        "names": [
          {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
          }
        ]
    }
    EOF
    
    #生成CA证书  
    cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    #生成Etcd证书  
    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www service-csr.json | cfssljson -bare server
    
    [root@master01 etcd]# ll
    total 36
    -rw-r--r-- 1 root root  315 Aug  8 01:25 ca-config.json
    -rw-r--r-- 1 root root  956 Aug  8 01:31 ca.csr
    -rw-r--r-- 1 root root  213 Aug  8 01:26 ca-csr.json
    -rw------- 1 root root 1679 Aug  8 01:31 ca-key.pem
    -rw-r--r-- 1 root root 1265 Aug  8 01:31 ca.pem
    -rw-r--r-- 1 root root 1054 Aug  8 01:40 server.csr
    -rw------- 1 root root 1675 Aug  8 01:40 server-key.pem #etcd 客户端使用
    -rw-r--r-- 1 root root 1379 Aug  8 01:40 server.pem
    -rw-r--r-- 1 root root  323 Aug  8 01:29 service-csr.json
    
    

    kubernetes 证书配置

    mkdir -p /root/kubernetes && cd /root/kubernetes
    #CA证书配置
    cat > ca-config.json <<EOF
    {
        "signing": {
          "default": {
            "expiry": "87600h"
          },
          "profiles": {
            "kubernetes": {
              "expiry": "89600h",
              "usages": [
                 "signing",
                 "key encipherment",
                 "server auth",
                 "client auth"
              ]
            }
          }  
        }
    }
    EOF
    
    #创建CA证书请求文件
    cat > ca-csr.json <<EOF
    {
        "CN": "kubernetes",
        "key": {
              "algo": "rsa",
              "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "Beijing",
                "ST": "Beijing",
                "O": "k8s",
                "OU": "System"
            }
        ]
    }
    EOF
    
    #创建API-SERVER证书请求文件,可以把所有master IP加入csr文件中  
    cat > service-csr.json <<EOF
    {
        "CN": "kubernetes",
        "hosts": [
        "master01",
        "master02",
        "master03",
        "node01",
        "node02",
        "192.168.68.146",
        "192.168.68.147",
        "192.168.68.148",
        "192.168.68.149",
        "192.168.68.151",
        "192.168.68.1",
        "10.0.0.1",
        "10.0.0.2",
        "127.0.0.1",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
        ],
        "key": {
           "algo": "rsa",
           "size": 2048
        },
        "names": [
          {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
          }
        ]
    }
    EOF
    
    #创建kubernetes proxy证书申请
    cat > kube-proxy-csr.json <<EOF
    {
        "CN": "system:kube-proxy",
        "hosts": [],
        "key": {
              "algo": "rsa",
              "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "Beijing",
                "ST": "Beijing",
                "O": "k8s",
                "OU": "System"
            }
        ]
    }
    EOF
    
    #生成CA证书  
    cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    #生成api-server证书  
    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes service-csr.json | cfssljson -bare server
    #生成kube-proxy证书  
    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
    
    [root@master01 kubernetes]# ll
    total 52
    -rw-r--r-- 1 root root  322 Aug  8 01:43 ca-config.json
    -rw-r--r-- 1 root root 1001 Aug  8 01:55 ca.csr
    -rw-r--r-- 1 root root  268 Aug  8 01:53 ca-csr.json
    -rw------- 1 root root 1675 Aug  8 01:55 ca-key.pem
    -rw-r--r-- 1 root root 1359 Aug  8 01:55 ca.pem
    -rw-r--r-- 1 root root 1009 Aug  8 01:57 kube-proxy.csr
    -rw-r--r-- 1 root root  292 Aug  8 01:54 kube-proxy-csr.json
    -rw------- 1 root root 1675 Aug  8 01:57 kube-proxy-key.pem
    -rw-r--r-- 1 root root 1403 Aug  8 01:57 kube-proxy.pem
    -rw-r--r-- 1 root root 1358 Aug  8 01:56 server.csr
    -rw------- 1 root root 1675 Aug  8 01:56 server-key.pem
    -rw-r--r-- 1 root root 1724 Aug  8 01:56 server.pem
    -rw-r--r-- 1 root root  670 Aug  8 01:51 service-csr.json
    

    6.安装Etcd

    下载etcd二进制文件

    mkdir /root/soft && cd /root/soft
    wget https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz
    tar zxvf etcd-v3.3.10-linux-amd64.tar.gz
    cd etcd-v3.3.10-linux-amd64
    cp etcd etcdctl /usr/local/bin/
    

    6.1 编辑etcd配置文件

    #master01
    mkdir -p /etc/etcd/{cfg,ssl}
    cat >/etc/etcd/cfg/etcd.conf<<EOF
    #{Member}
    ETCD_NAME="master01"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.68.146:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.68.146:2379,http://192.168.68.146:2390"
    
    #{Clustering}
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.68.146:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.68.146:2379"
    ETCD_INITIAL_CLUSTER="master01=https://192.168.68.146:2380,master02=https://192.168.68.147:2380,master03=https://192.168.68.148:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    EOF
    
    #master02
    mkdir -p /etc/etcd/{cfg,ssl}
    cat >/etc/etcd/cfg/etcd.conf<<EOF
    #{Member}
    ETCD_NAME="master02"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.68.147:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.68.147:2379,http://192.168.68.147:2390"
    
    #{Clustering}
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.68.147:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.68.147:2379"
    ETCD_INITIAL_CLUSTER="master01=https://192.168.68.146:2380,master02=https://192.168.68.147:2380,master03=https://192.168.68.148:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    EOF
    
    #master03
    mkdir -p /etc/etcd/{cfg,ssl}
    cat >/etc/etcd/cfg/etcd.conf<<EOF
    #{Member}
    ETCD_NAME="master03"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.68.148:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.68.148:2379,http://192.168.68.148:2390"
    
    #{Clustering}
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.68.148:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.68.148:2379"
    ETCD_INITIAL_CLUSTER="master01=https://192.168.68.146:2380,master02=https://192.168.68.147:2380,master03=https://192.168.68.148:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    EOF
    
    

    参数说明:

    字段名 说明
    ETCD_NAME 节点名称,如果有多个节点,那么每个节点都要修改为本节点的名称。
    ETCD_DATA_DIR 数据目录。
    ETCD_LISTEN_PEER_URLS 集群通信监听地址。
    ETCD_LISTEN_CLIENT_URLS 客户端访问监听地址。
    ETCD_INITIAL_ADVERTISE_PEER_URLS 集群通告地址。
    ETCD_ADVERTISE_CLIENT_URLS 客户端通告地址。
    ETCD_INITIAL_CLUSTER 集群节点地址,如果多个节点那么逗号分隔。
    ETCD_INITIAL_CLUSTER_TOKEN 集群token。
    ETCD_INITIAL_CLUSTER_STATE 加入集群的当前状态,new是新集群,existing表示加入已有集群。

    6.2 创建etcd的系统启动服务

    分别在master01/02/03上创建etcd系统启动文件

    cat >/usr/lib/systemd/system/etcd.service<<EOF
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target
    
    [Service]
    Type=notify
    EnvironmentFile=/etc/etcd/cfg/etcd.conf
    ExecStart=/usr/local/bin/etcd 
    --name=${ETCD_NAME} 
    --data-dir=${ETCD_DATA_DIR} 
    --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} 
    --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 
    --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} 
    --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} 
    --initial-cluster=${ETCD_INITIAL_CLUSTER} 
    --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} 
    --initial-cluster-state=new 
    --cert-file=/etc/etcd/ssl/server.pem 
    --key-file=/etc/etcd/ssl/server-key.pem 
    --peer-cert-file=/etc/etcd/ssl/server.pem 
    --peer-key-file=/etc/etcd/ssl/server-key.pem 
    --trusted-ca-file=/etc/etcd/ssl/ca.pem 
    --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem
    Restart=on-failure
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    EOF
    

    6.3 复制etcd证书到制定目录

    此目录与之前的etcd启动目录一致
    如果有多个master节点,那么需要复制到每个master

    mkdir -p /etc/etcd/ssl
    cp /root/etcd/*pem /etc/etcd/ssl/ -rf
    #复制etcd证书到每个etcd节点(本次为3个master节点)
    for i in master02 master03;do ssh $i mkdir -p /etc/etcd/{cfg,ssl};done
    for i in master02 master03;do scp /etc/etcd/ssl/* $i:/etc/etcd/ssl/;done
    

    6.4 启动etcd

    systemctl enable etcd
    systemctl start etcd
    systemctl status etcd
    

    6.5 检查etcd集群是否运行正常

    [root@master01 system]# etcdctl 
    -ca-file=/etc/etcd/ssl/ca.pem 
    --cert-file=/etc/etcd/ssl/server.pem 
    --key-file=/etc/etcd/ssl/server-key.pem 
    --endpoint="https://192.168.68.146:2379" 
    cluster-health
    member 518905a4e1408b4a is healthy: got healthy result from https://192.168.68.148:2379
    member 9affe5eacb47bb95 is healthy: got healthy result from https://192.168.68.147:2379
    member d040d1696a38da95 is healthy: got healthy result from https://192.168.68.146:2379
    cluster is healthy
    

    6.6 创建docker所需分配POD网段

    向etcd写入集群pod网段信息
    172.17.0.0/16 为kubernetes pod的IP地址段
    网段必须与kube-controller-manager的--cluster-cidr参数一致

     etcdctl --endpoint="https://192.168.68.146:2379,https://192.168.68.147:2379,https://192.168.68.148:2379" 
     -ca-file=/etc/etcd/ssl/ca.pem 
     --cert-file=/etc/etcd/ssl/server.pem 
     --key-file=/etc/etcd/ssl/server-key.pem 
     set /coreos.com/network/config 
     '{"Network":"172.17.0.0/16","Backend":{"Type":"vxlan"}}'
    

    检查是否建立网段

    etcdctl --endpoint="https://192.168.68.146:2379,https://192.168.68.147:2379,https://192.168.68.148:2379" 
     -ca-file=/etc/etcd/ssl/ca.pem 
     --cert-file=/etc/etcd/ssl/server.pem 
     --key-file=/etc/etcd/ssl/server-key.pem 
     get /coreos.com/network/config
     {"Network":"172.17.0.0/16","Backend":{"Type":"vxlan"}}
    

    7. 安装docker

    在所有node节点安装docker
    运行前面的系统初始化脚本部署docker
    注意:docker启动文件配置如下:

    [root@node02 ~]# more /usr/lib/systemd/system/docker.service 
    [Unit]
    Description=Docker Application Container Engine
    Documentation=https://docs.docker.com
    BindsTo=containerd.service
    After=network-online.target firewalld.service containerd.service
    Wants=network-online.target
    
    [Service]
    Type=notify
    EnvironmentFile=/run/flannel/subnet.env
    ExecStart=/usr/bin/dockerd --data-root=/data/docker $DOCKER_NETWORK_OPTIONS
    ExecReload=/bin/kill -s HUP 
    TimeoutSec=0
    RestartSec=2
    Restart=on-failure
    StartLimitBurst=3
    StartLimitInterval=60s
    LimitNOFILE=infinity
    LimitNPROC=infinity
    LimitCORE=infinity
    TasksMax=infinity
    Delegate=yes
    KillMode=process
    [Install]
    WantedBy=multi-user.target
    

    8. 安装flannel

    8.1 下载flannel二进制包

    所有的节点

    mkdir soft && cd soft
    #下载链接: https://pan.baidu.com/s/1M-3tgKkA0Pl0qMtlyT3G8Q 提取码: drtr
    tar zxvf flannel-v0.10.0-linux-amd64.tar.gz
    mv flanneld mk-docker-opts.sh /usr/local/bin/
    #复制到其他节点
    for i in master02 master03 node01 node02;do scp /usr/local/bin/flanneld $i:/usr/local/bin/;done
    for i in master02 master03 node01 node02;do scp /usr/local/bin/mk-docker-opts.sh $i:/usr/local/bin/;done
    

    8.2 配置flannel

    mkdir -p /etc/flannel
    cat >/etc/flannel/flannel.cfg<<EOF
    FLANNEL_OPTIONS="-etcd-endpoints=https://192.168.68.146:2379,https://192.168.68.147:2379,https://192.168.68.148:2379  -etcd-cafile=/etc/etcd/ssl/ca.pem  -etcd-certfile=/etc/etcd/ssl/server.pem -etcd-keyfile=/etc/etcd/ssl/server-key.pem"
    EOF
    

    8.3 配置flanneld系统启动文件

    cat >/usr/lib/systemd/system/flanneld.service<<EOF
    [Unit]
    Description=Flanneld overlay address etcd agent
    After=network-online.target network.target
    Before=docker.service
    
    [Service]
    Type=notify
    EnvironmentFile=/etc/flannel/flannel.cfg
    ExecStart=/usr/local/bin/flanneld --ip-masq $FLANNEL_OPTIONS
    ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
    for i in master02 master03 node01 node01;do scp /usr/lib/systemd/system/flanneld.service $i:/usr/lib/systemd/system/;done
    

    启动脚本说明
    mk-docker-opts.sh 脚本将分配给flannel的Pod子网网段信息写入/run/flannel/docker文件,后续docker启动时使用这个文件中的环境变量配置docker0网桥。
    flanneld使用系统缺省路由所在的接口与其他节点通信,对于有多个网络接口(如公网和内网)的节点,可以使用-iface参数指定通信接口,如eth0,ens160等。

    8.4 启动flanneld并检查状态

    systemctl enable flanneld
    systemctl start flanneld
    

    所有节点都需要有172.17.0.0/16的网段IP

    4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default 
        link/ether 26:c8:e8:41:78:d4 brd ff:ff:ff:ff:ff:ff
        inet 172.17.39.0/32 scope global flannel.1
           valid_lft forever preferred_lft forever
        inet6 fe80::24c8:e8ff:fe41:78d4/64 scope link 
           valid_lft forever preferred_lft forever
    

    node节点停止flanneld
    systemctl stop flanneld

    8.5 在node01,node02修改docker启动文件

    cat >/usr/lib/systemd/system/docker.service<<EOF
    [Unit]
    Description=Docker Application Container Engine
    Documentation=https://docs.docker.com
    BindsTo=containerd.service
    After=network-online.target firewalld.service containerd.service
    Wants=network-online.target
    
    [Service]
    Type=notify
    EnvironmentFile=/run/flannel/subnet.env #docker使用flannel的地址文件
    ExecStart=/usr/bin/dockerd --data-root=/data/docker $DOCKER_NETWORK_OPTIONS
    ExecReload=/bin/kill -s HUP $MAINPID
    TimeoutSec=0
    RestartSec=2
    Restart=on-failure
    StartLimitBurst=3
    StartLimitInterval=60s
    LimitNOFILE=infinity
    LimitNPROC=infinity
    LimitCORE=infinity
    TasksMax=infinity
    Delegate=yes
    KillMode=process
    [Install]
    WantedBy=multi-user.target
    EOF
    

    8.6 重启docker

    systemctl daemon-reload
    systemctl restart docker
    

    检查,docker0和flannel的IP是在同一个网段

    3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
        link/ether 02:42:22:f5:c8:4a brd ff:ff:ff:ff:ff:ff
        inet 172.17.49.1/24 brd 172.17.49.255 scope global docker0
           valid_lft forever preferred_lft forever
    4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default 
        link/ether e2:18:c3:93:cb:92 brd ff:ff:ff:ff:ff:ff
        inet 172.17.49.0/32 scope global flannel.1
           valid_lft forever preferred_lft forever
        inet6 fe80::e018:c3ff:fe93:cb92/64 scope link 
           valid_lft forever preferred_lft forever
    

    8.7 node节点验证是否可以访问其他节点docker0

    在每个node节点ping其他节点,网段都是通得

    #node02 docker0 ip 172.17.49.1,在node01上ping该IP是通的
    [root@node01 soft]# ping 172.17.49.1
    PING 172.17.49.1 (172.17.49.1) 56(84) bytes of data.
    64 bytes from 172.17.49.1: icmp_seq=1 ttl=64 time=0.299 ms
    64 bytes from 172.17.49.1: icmp_seq=2 ttl=64 time=0.234 ms
    ^C
    --- 172.17.49.1 ping statistics ---
    2 packets transmitted, 2 received, 0% packet loss, time 999ms
    rtt min/avg/max/mdev = 0.234/0.266/0.299/0.036 ms
    

    9 安装master组件

    Master端需要安装的组件如下:
    kube-apiserver
    kube-scheduler
    kube-controller-manager

    9.1 安装Api Server服务 (所有的master节点)

    9.1.1 下载kubernetes二进制包
    cd /root/soft
    tar zxvf kubernetes-server-linux-amd64.tar.gz
    cd kubernetes/server/bin
    cp kube-scheduler kube-apiserver kube-controller-manager kubectl /usr/local/bin/
    #复制执行文件到其他master节点
    for i in master02 master03;do scp /usr/local/bin/kube* $i:/usr/local/bin/;done
    
    9.1.2 配置kubernetes证书

    kubernetes各个组件之间通信需要证书,需要复制给每个master节点

    mkdir -p /etc/kubernetes/{cfg,ssl}
    cp /root/kubernetes/*.pem /etc/kubernetes/ssl/
    
    #复制到所有节点
    for i in master02 master03 node01 node02;do ssh $i mkdir -p /etc/kubernetes/{cfg,ssl};done
    for i in master02 master03 node01 node02;do scp /root/kubernetes/*.pem $i:/etc/kubernetes/ssl/;done
    
    9.1.3 创建TLS Bootstrapping Token

    TLS Bootstrapping 的作用是让kubelet先使用一个预定的低权限用户连接到apiserver,然后想apiserver申请证书,kubelet的证书由apiserver动态签署。
    Token可以是任意的包含128bit的字符串,可以使用安全的随机数发生器生成

    [root@master01 ~]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
    a37e9d743248a4589728d60cd35c159c
    
    9.1.4 编辑Token文件

    msater01操作
    a37e9d743248a4589728d60cd35c159c :随机字符串,自定义生成。
    kubelet-bootstrap:用户名
    10001 UID
    system kubelet-bootstrap 用户组

    cat >/etc/kubernetes/cfg/token.csv<<EOF
    a37e9d743248a4589728d60cd35c159c,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    EOF
    #将token文件传到master02和master03上。
    for i in master02 master03;do scp /etc/kubernetes/cfg/token.csv $i:/etc/kubernetes/cfg;done
    
    9.1.5 创建apiserver配置文件

    配置文件内容基本相同,如果有多个master节点,修改IP地址即可。
    master01

    cat >/etc/kubernetes/cfg/kube-apiserver.cfg<<EOF
    KUBE_APISERVER_OPTS="--logtostderr=true 
    --v=4 
    --insecure-bind-address=0.0.0.0 
    --etcd-servers=https://192.168.68.146:2379,https://192.168.68.147:2379,https://192.168.68.148:2379 
    --bind-address=0.0.0.0 
    --secure-port=6443 
    --advertise-address=0.0.0.0 
    --allow-privileged=true 
    --service-cluster-ip-range=10.0.0.0/24 
    --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction 
    --authorization-mode=RBAC,Node 
    --enable-bootstrap-token-auth 
    --token-auth-file=/etc/kubernetes/cfg/token.csv 
    --service-node-port-range=30000-50000 
    --tls-cert-file=/etc/kubernetes/ssl/server.pem 
    --tls-private-key-file=/etc/kubernetes/ssl/server-key.pem 
    --client-ca-file=/etc/kubernetes/ssl/ca.pem 
    --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem 
    --etcd-cafile=/etc/etcd/ssl/ca.pem 
    --etcd-certfile=/etc/etcd/ssl/server.pem 
    --etcd-keyfile=/etc/etcd/ssl/server-key.pem"
    EOF
    

    master02和master03将配置文件中的bind-address和advertise-address分别改成0.0.0.0和0.0.0.0即可。
    master02

    cat >/etc/kubernetes/cfg/kube-apiserver.cfg<<EOF
    KUBE_APISERVER_OPTS="--logtostderr=true 
    --v=4 
    --insecure-bind-address=0.0.0.0 
    --etcd-servers=https://192.168.68.146:2379,https://192.168.68.147:2379,https://192.168.68.148:2379 
    #--bind-address=192.168.68.147 
    --bind-address=0.0.0.0 
    --secure-port=6443 
    #--advertise-address=192.168.68.147
    --advertise-address=0.0.0.0
    --allow-privileged=true 
    --service-cluster-ip-range=10.0.0.0/24 
    --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction 
    --authorization-mode=RBAC,Node 
    --enable-bootstrap-token-auth 
    --token-auth-file=/etc/kubernetes/cfg/token.csv 
    --service-node-port-range=30000-50000 
    --tls-cert-file=/etc/kubernetes/ssl/server.pem 
    --tls-private-key-file=/etc/kubernetes/ssl/server-key.pem 
    --client-ca-file=/etc/kubernetes/ssl/ca.pem 
    --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem 
    --etcd-cafile=/etc/etcd/ssl/ca.pem 
    --etcd-certfile=/etc/etcd/ssl/server.pem 
    --etcd-keyfile=/etc/etcd/ssl/server-key.pem"
    EOF
    

    master03

    cat >/etc/kubernetes/cfg/kube-apiserver.cfg<<EOF
    KUBE_APISERVER_OPTS="--logtostderr=true 
    --v=4 
    --insecure-bind-address=0.0.0.0 
    --etcd-servers=https://192.168.68.146:2379,https://192.168.68.147:2379,https://192.168.68.148:2379 
    --bind-address=0.0.0.0 
    --secure-port=6443 
    --advertise-address=0.0.0.0 
    --allow-privileged=true 
    --service-cluster-ip-range=10.0.0.0/24 
    --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction 
    --authorization-mode=RBAC,Node 
    --enable-bootstrap-token-auth 
    --token-auth-file=/etc/kubernetes/cfg/token.csv 
    --service-node-port-range=30000-50000 
    --tls-cert-file=/etc/kubernetes/ssl/server.pem 
    --tls-private-key-file=/etc/kubernetes/ssl/server-key.pem 
    --client-ca-file=/etc/kubernetes/ssl/ca.pem 
    --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem 
    --etcd-cafile=/etc/etcd/ssl/ca.pem 
    --etcd-certfile=/etc/etcd/ssl/server.pem 
    --etcd-keyfile=/etc/etcd/ssl/server-key.pem"
    EOF
    
    9.1.6 创建kube-apiserver启动文件
    cat >/usr/lib/systemd/system/kube-apiserver.service<<EOF
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.cfg
    ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
    9.1.7 启动kube-apiserver服务
    systemctl start kube-apiserver
    systemctl status kube-apiserver
    systemctl enable kube-apiserver
    

    查看加密端口6443是否已经启用

    [root@master01 ~]# netstat -lntup | grep 6443   
    tcp        0      0 192.168.68.146:6443     0.0.0.0:*               LISTEN      32470/kube-apiserve 
    

    9.2 部署kube-scheduler服务

    创建kube-scheduler配置文件(所有的master节点)

    cat >/etc/kubernetes/cfg/kube-scheduler.cfg<<EOF
    KUBE_SCHEDULER_OPTS="--logtostderr=true --v=4 --bind-address=0.0.0.0 --master=127.0.0.1:8080 --leader-elect"
    EOF
    

    参数说明:
    --bind-address=0.0.0.0 启动绑定地址
    --master 连接本地apiserver(非加密端口)
    --leader-elect=true 集群运行模式,启用选举功能,被选为leader的节点负责处理工作,其他节点为阻塞状态。

    9.2.1 创建kube-scheduler启动文件

    创建kube-scheduler systemd unit 文件

    cat >/usr/lib/systemd/system/kube-scheduler.service<<EOF
    [Unit]
    Description=Kubernetes Scheduler
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.cfg
    ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
    9.2.2 启动kube-scheduler服务
    systemctl start kube-scheduler
    systemctl status kube-scheduler
    systemctl enable kube-scheduler
    
    9.2.3 查看master组件状态
    [root@master01 ~]# kubectl get cs
    NAME                 STATUS      MESSAGE                                                                                     ERROR
    controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused   
    scheduler            Healthy     ok                                                                                          
    etcd-0               Healthy     {"health":"true"}                                                                           
    etcd-2               Healthy     {"health":"true"}                                                                           
    etcd-1               Healthy     {"health":"true"} 
    

    9.3 部署kube-controller-manager

    9.3.1 创建kube-contaoller-manager配置文件
    cat >/etc/kubernetes/cfg/kube-controller-manager.cfg<<EOF
    KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true 
    --v=4 
    --master=127.0.0.1:8080 
    --leader-elect=true 
    --address=0.0.0.0 
    --service-cluster-ip-range=10.0.0.0/24 
    --cluster-name=kubernetes 
    --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem 
    --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem 
    --root-ca-file=/etc/kubernetes/ssl/ca.pem 
    --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem"
    EOF
    

    参数说明:
    --master=127.0.0.1:8080 指定master地址
    --leader-elect 竞争选举机制产生一个lead节点,其他节点为阻塞状态
    --service-cluster-ip-range kubernetes service 指定的IP地址范围。

    9.3.2创建kube-controller-manager启动文件
    cat >/usr/lib/systemd/system/kube-controller-manager.service<<EOF
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.cfg
    ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
    9.3.3 启动kube-controller-manager服务
    systemctl start kube-controller-manager
    systemctl status kube-controller-manager
    systemctl enable kube-controller-manager
    
    [root@master01 ~]# systemctl status kube-controller-manager
    ● kube-controller-manager.service - Kubernetes Controller Manager
       Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; disabled; vendor preset: disabled)
       Active: active (running) since Mon 2020-08-10 10:31:05 CST; 50s ago
         Docs: https://github.com/kubernetes/kubernetes
     Main PID: 8635 (kube-controller)
       CGroup: /system.slice/kube-controller-manager.service
               └─8635 /usr/local/bin/kube-controller-manager --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect=true --address=0.0.0.0 --service-cluster-ip-range=10.0.0.0/24 --c...
    
    
    9.3.4 查看master组件状态
    [root@master01 ~]# kubectl get cs
    NAME                 STATUS    MESSAGE             ERROR
    scheduler            Healthy   ok                  
    controller-manager   Healthy   ok                  
    etcd-0               Healthy   {"health":"true"}   
    etcd-2               Healthy   {"health":"true"}   
    etcd-1               Healthy   {"health":"true"}   
    

    10 Node节点组件

    Node节点需要部署的组件
    kubelet
    kube-proxy
    flannel
    docker

    10.1 部署kubelet组件

    kubelet运行在每个node节点上,接收kube-apiserver发送的请求,管理pod容器,执行交互式命令,如exec,run,log等
    kubelet启动时自动向kube-apiserver注册节点信息,内置的cadvisor统计和监控节点的资源使用情况。

    10.1.1 从master节点复制kubernetes文件到node

    从master01上把kubelet和kube-proxy二进制文件复制到node01和node02上

    cd /root/soft
    scp kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy node01:/usr/local/bin/
    scp kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy node02:/usr/local/bin/
    
    10.1.2 创建kubelet bootstrap.kubeconfig文件

    kubernetes中kubeconfig配置文件用于访问集群信息,在开启了TLS的集群中,每次与集群交互都需要身份认证,生产环境一般用证书进行认证,其认证所需要的信息会放在kubeconfig文件中。

    master01节点

    mkdir /root/config && cd /root/config
    cat >environment.sh<<EOF
    #创建kubelet bootstrapping kubeconfig
    BOOTSTRAP_TOKEN=a37e9d743248a4589728d60cd35c159c #前面创建的token
    KUBE_APISERVER="https://192.168.68.1:6443" #VIP地址
    #设置集群参数
    kubectl config set-cluster kubernetes 
      --certificate-authority=/etc/kubernetes/ssl/ca.pem 
      --embed-certs=true 
      --server=${KUBE_APISERVER} 
      --kubeconfig=bootstrap.kubeconfig
    #设置客户端认证参数
    kubectl config set-credentials kubelet-bootstrap 
      --token=${BOOTSTRAP_TOKEN} 
      --kubeconfig=bootstrap.kubeconfig
    #设置上下文参数
    kubectl config set-context default 
      --cluster=kubernetes 
      --user=kubelet-bootstrap 
      --kubeconfig=bootstrap.kubeconfig
    #设置默认上下文
    kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
    #通过bash environment.sh 获取bootstrap.kubeconfig配置文件
    EOF
    

    执行脚本

    sh environment.sh
    [root@master01 config]# sh environment.sh 
    Cluster "kubernetes" set.
    User "kubelet-bootstrap" set.
    Context "default" created.
    Switched to context "default".
    
    10.1.3 创建kube-proxy kubeconfig文件
    cat >env_proxy.sh<<EOF
    #创建kube-proxy kubeconfig文件
    BOOTSTRAP_TOKEN=a37e9d743248a4589728d60cd35c159c
    KUBE_APISERVER="https://192.168.68.1:6443"
    
    kubectl config set-cluster kubernetes 
      --certificate-authority=/etc/kubernetes/ssl/ca.pem 
      --embed-certs=true 
      --server=${KUBE_APISERVER} 
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config set-credentials kube-proxy 
      --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem 
      --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem 
      --embed-certs=true 
      --kubeconfig=kube-proxy.kubeconfig
      
    kubectl config set-context default 
      --cluster=kubernetes 
      --user=kube-proxy 
      --kubeconfig=kube-proxy.kubeconfig
      
    kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
    EOF
    

    执行脚本

    [root@master01 config]# sh env_proxy.sh  
    Cluster "kubernetes" set.
    User "kube-proxy" set.
    Context "default" created.
    Switched to context "default".
    
    10.1.4 复制kubeconfig文件和证书到所有node节点

    将bootstrap kubeconfig kube-proxy.kubeconfig文件复制到所有node节点

    ssh node01 "mkdir -p /etc/kubernetes/{cfg,ssl}"
    ssh node02 "mkdir -p /etc/kubernetes/{cfg,ssl}"
    

    复制证书文件

    scp /etc/kubernetes/ssl/* node01:/etc/kubernetes/ssl/
    scp /etc/kubernetes/ssl/* node02:/etc/kubernetes/ssl/
    

    复制kubeconfig文件

    cd /root/config
    scp -rp bootstrap.kubeconfig kube-proxy.kubeconfig node01:/etc/kubernetes/cfg/
    scp -rp bootstrap.kubeconfig kube-proxy.kubeconfig node02:/etc/kubernetes/cfg/
    
    [root@master01 config]# scp /etc/kubernetes/ssl/* node01:/etc/kubernetes/ssl/
    ca-key.pem                                                                                                                                                 100% 1675     3.5MB/s   00:00    
    ca.pem                                                                                                                                                     100% 1359     3.0MB/s   00:00    
    kube-proxy-key.pem                                                                                                                                         100% 1675     4.2MB/s   00:00    
    kube-proxy.pem                                                                                                                                             100% 1403     3.9MB/s   00:00    
    server-key.pem                                                                                                                                             100% 1675     4.2MB/s   00:00    
    server.pem                                                                                                                                                 100% 1724     4.4MB/s   00:00    
    [root@master01 config]# scp /etc/kubernetes/ssl/* node02:/etc/kubernetes/ssl/
    ca-key.pem                                                                                                                                                 100% 1675     2.7MB/s   00:00    
    ca.pem                                                                                                                                                     100% 1359     2.9MB/s   00:00    
    kube-proxy-key.pem                                                                                                                                         100% 1675     4.0MB/s   00:00    
    kube-proxy.pem                                                                                                                                             100% 1403     3.0MB/s   00:00    
    server-key.pem                                                                                                                                             100% 1675     4.4MB/s   00:00    
    server.pem                                                                                                                                                 100% 1724     4.0MB/s   00:00    
    [root@master01 config]# cd /root/config/
    [root@master01 config]# scp -rp bootstrap.kubeconfig kube-proxy.kubeconfig node01:/etc/kubernetes/cfg/
    bootstrap.kubeconfig                                                                                                                                       100% 2166     1.6MB/s   00:00    
    kube-proxy.kubeconfig                                                                                                                                      100% 6268     4.8MB/s   00:00    
    [root@master01 config]# scp -rp bootstrap.kubeconfig kube-proxy.kubeconfig node02:/etc/kubernetes/cfg/
    bootstrap.kubeconfig                                                                                                                                       100% 2166     1.6MB/s   00:00    
    kube-proxy.kubeconfig                                                                                                                                      100% 6268     5.2MB/s   00:00    
    [root@master01 config]# 
    
    10.1.5 创建kubelet参数文件

    不同NODE节点,需要修改IP地址 (Node节点操作)
    node01

    cat >/etc/kubernetes/cfg/kubelet.config<<EOF
    kind: KubeletConfiguration
    apiVersion: kubelet.config.k8s.io/v1beta1
    address: 192.168.68.149
    port: 10250
    readOnlyPort: 10255
    cgroupDriver: cgroupfs
    clusterDNS: ["10.0.0.2"]
    clusterDomain: cluster.local
    failSwapOn: false
    authentication:
      anonymous:
        enabled: true
    EOF
    

    node02

    cat >/etc/kubernetes/cfg/kubelet.config<<EOF
    kind: KubeletConfiguration
    apiVersion: kubelet.config.k8s.io/v1beta1
    address: 192.168.68.151
    port: 10250
    readOnlyPort: 10255
    cgroupDriver: cgroupfs
    clusterDNS: ["10.0.0.2"]
    clusterDomain: cluster.local
    failSwapOn: false
    authentication:
      anonymous:
        enabled: true
    EOF
    
    10.1.6 创建kubelet配置文件

    不同的node节点,需要修改IP地址
    /etc/kubernetes/cfg/kubelet.kubeconfig文件自动生成

    cat >/etc/kubernetes/cfg/kubelet<<EOF
    KUBELET_OPTS="--logtostderr=true 
    --v=4 
    --hostname-override=192.168.68.149 
    --kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig 
    --bootstrap-kubeconfig=/etc/kubernetes/cfg/bootstrap.kubeconfig 
    --config=/etc/kubernetes/cfg/kubelet.config 
    --cert-dir=/etc/kubernetes/ssl 
    --pod-infra-container-image=docker.io/kubernetes/pause:latest"
    EOF
    
    cat >/etc/kubernetes/cfg/kubelet<<EOF
    KUBELET_OPTS="--logtostderr=true 
    --v=4 
    --hostname-override=192.168.68.151 
    --kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig 
    --bootstrap-kubeconfig=/etc/kubernetes/cfg/bootstrap.kubeconfig 
    --config=/etc/kubernetes/cfg/kubelet.config 
    --cert-dir=/etc/kubernetes/ssl 
    --pod-infra-container-image=docker.io/kubernetes/pause:latest"
    EOF
    
    10.1.7 创建kubelet系统启动文件
    cat >/usr/lib/systemd/system/kubelet.service<<EOF
    [Unit]
    Description=Kubernetes Kubelet
    After=docker.service
    Requires=docker.service
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=/etc/kubernetes/cfg/kubelet
    ExecStart=/usr/local/bin/kubelet $KUBELET_OPTS
    Restart=on-failure
    KillMode=process
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
    10.1.8 将kubelet-bootstrap用户绑定到系统集群角色

    Master01节点操作

    kubectl create clusterrolebinding kubelet-bootstrap 
      --clusterrole=system:node-bootstrapper 
      --user=kubelet-bootstrap
    
    10.1.9 启动kubelet服务(node节点)
    systemctl start kubelet
    systemctl status kubelet
    systemctl enable kubelet
    

    10.2 服务端批准与查看csr请求

    查看csr请求
    msater01节点操作

    [root@master01 config]# kubectl get csr
    NAME                                                   AGE     REQUESTOR           CONDITION
    node-csr-EIYu6J_7noPLUQc28Z3kEUQPlD0SdVOdexxFQqclQyQ   5m26s   kubelet-bootstrap   Pending
    node-csr-k6HGdR3UQ0cpvFKot2it_YsUN8uHWlsFq0fFiA5bnzU   12m     kubelet-bootstrap   Pending
    
    10.2.1 批准请求

    master01节点操作

    kubectl certificate approve node-csr-EIYu6J_7noPLUQc28Z3kEUQPlD0SdVOdexxFQqclQyQ
    kubectl certificate approve node-csr-k6HGdR3UQ0cpvFKot2it_YsUN8uHWlsFq0fFiA5bnzU
    
    [root@master01 config]# kubectl get csr
    NAME                                                   AGE     REQUESTOR           CONDITION
    node-csr-EIYu6J_7noPLUQc28Z3kEUQPlD0SdVOdexxFQqclQyQ   8m46s   kubelet-bootstrap   Approved,Issued
    node-csr-k6HGdR3UQ0cpvFKot2it_YsUN8uHWlsFq0fFiA5bnzU   16m     kubelet-bootstrap   Approved,Issued
    

    10.3 节点重名处理方法

    如果出现在编写新的节点的/etc/kubernetes/cfg/kubelet文件未修改--hostname-override=192.168.68.151字段导致出现节点重名,可以先删除证书,然后重新申请
    master节点操作

    kubectl  delete csr node-csr-EIYu6J_7noPLUQc28Z3kEUQPlD0SdVOdexxFQqclQyQ
    

    node节点删除kubelet.kubeconfig
    客户端重启kubelet服务,再重新申请证书

    rm -rf /etc/kubernetes/cfg/kubelet.kubeconfig
    

    10.4 查看节点状态

    所有node节点状态必须为Ready

    [root@master01 config]# kubectl get nodes
    NAME             STATUS   ROLES    AGE     VERSION
    192.168.68.149   Ready    <none>   6m24s   v1.15.1
    192.168.68.151   Ready    <none>   6m38s   v1.15.1
    

    10.5 部署kube-proxy组件

    kube-proxy运行在node节点上,监听apiserver中service和endpoint的变化情况,创建路由规则来进行服务负载均衡。

    10.5.1 创建kube-proxy配置文件

    注意修改hostname-override地址,不同的node IP不相同。
    node01

    cat >/etc/kubernetes/cfg/kube-proxy<<EOF
    KUBE_PROXY_OPTS="--logtostderr=true 
    --v=4 
    --metrics-bind-address=0.0.0.0 
    --hostname-override=192.168.68.149 
    --cluster-cidr=10.0.0.0/24 
    --kubeconfig=/etc/kubernetes/cfg/kube-proxy.kubeconfig"
    EOF
    

    node02

    cat >/etc/kubernetes/cfg/kube-proxy<<EOF
    KUBE_PROXY_OPTS="--logtostderr=true 
    --v=4 
    --metrics-bind-address=0.0.0.0 
    --hostname-override=192.168.68.151 
    --cluster-cidr=10.0.0.0/24 
    --kubeconfig=/etc/kubernetes/cfg/kube-proxy.kubeconfig"
    EOF
    
    10.5.2 创建kube-proxy启动文件
    cat >/usr/lib/systemd/system/kube-proxy.service<<EOF
    [Unit]
    Description=Kubernetes Proxy
    After=network.target
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=/etc/kubernetes/cfg/kube-proxy
    ExecStart=/usr/local/bin/kube-proxy $KUBE_PROXY_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
    10.5.3 启动服务
    systemctl start kube-proxy
    systemctl status kube-proxy
    systemctl enable kube-proxy
    

    11. 运行Demo项目

    kubectl run nginx --image=nginx --replicas=2
    [root@master01 config]# kubectl run nginx --image=nginx --replicas=2
    kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
    
    kubectl expose deployment nginx --port 88 --target-port=80 --type=NodePort
    

    11.1 查看pod

    kubectl get pods
    [root@master01 config]# kubectl get pods
    NAME                     READY   STATUS              RESTARTS   AGE
    nginx-7bb7cd8db5-577pp   0/1     ContainerCreating   0          27s
    nginx-7bb7cd8db5-lqpzd   0/1     ContainerCreating   0          27s
    
    [root@master01 config]# kubectl get pods
    NAME                     READY   STATUS    RESTARTS   AGE
    nginx-7bb7cd8db5-577pp   1/1     Running   0          108s
    nginx-7bb7cd8db5-lqpzd   1/1     Running   0          108s
    

    11.2 查看svc

    [root@master01 config]# kubectl get svc
    NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
    kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP        6h58m
    nginx        NodePort    10.0.0.61    <none>        88:42780/TCP   39s
    

    11.3 访问web

    [root@master01 config]# curl http://192.168.68.149:42780
    <!DOCTYPE html>
    <html>
    <head>
    <title>Welcome to nginx!</title>
    <style>
        body {
             35em;
            margin: 0 auto;
            font-family: Tahoma, Verdana, Arial, sans-serif;
        }
    </style>
    </head>
    <body>
    <h1>Welcome to nginx!</h1>
    <p>If you see this page, the nginx web server is successfully installed and
    working. Further configuration is required.</p>
    
    <p>For online documentation and support please refer to
    <a href="http://nginx.org/">nginx.org</a>.<br/>
    Commercial support is available at
    <a href="http://nginx.com/">nginx.com</a>.</p>
    
    <p><em>Thank you for using nginx.</em></p>
    </body>
    </html>
    

    11.4 删除项目

    kubectl delete deployment nginx
    kubectl delete pods nginx
    kubectl delete svc -l run=nginx
    kubectl delete deployment.apps/nginx
    
    [root@master01 config]# kubectl delete deployment nginx
    deployment.extensions "nginx" deleted
    [root@master01 config]# kubectl delete pods nginx
    Error from server (NotFound): pods "nginx" not found
    [root@master01 config]# kubectl delete svc -l run=nginx
    service "nginx" deleted
    [root@master01 config]# kubectl delete deployment.apps/nginx
    Error from server (NotFound): deployments.apps "nginx" not found
    

    11.5 服务启动顺序

    11.5.1 启动master节点
    systemctl start keepalived
    systemctl start etcd
    systemctl start kube-apiserver
    systemctl start kube-scheduler
    systemctl start kube-controller-manager
    systemctl start flanneld
    #查看k8s集群状态
    kubectl get cs
    kubectl get nodes
    kubectl get pods -A
    
    11.5.2 启动node节点
    systemctl start flanneld
    systemctl start docker
    systemctl start kubelet
    systemctl start kube-proxy
    
    11.5.3 停止node节点
    systemctl stop kube-proxy
    systemctl stop kubelet
    systemctl stop docker
    systemctl stop flanneld
    
  • 相关阅读:
    A4988和CNC SHIELD使用方法 步进电机
    MTP 写字机器
    s*s*r备用
    VHDL 例程
    ESP8266 使用
    世界四大航海家
    第六周学习进度总结
    关于tensorflow版本报错问题的解决办法
    第五周学习进度总结
    机器学习对文本的聚类KMeans
  • 原文地址:https://www.cnblogs.com/Doc-Yu/p/13552624.html
Copyright © 2020-2023  润新知