• 1.18 kubeconfig ./admin.conf


    1系统初始化
    1.1 初始化工具安装
    #所有节点
    [root@master-1 ~]# yum install net-tools vim wget lrzsz git -y

    1.2 关闭防火墙与Selinux
    #所有节点
    [root@master-1 ~]# systemctl stop firewalld
    [root@master-1 ~]# systemctl disable firewalld
    [root@master-1 ~]# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
    [root@master-1 ~]# reboot

    1.3设置时区
    #所有节点
    [root@master-1 ~]# \cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime -rf

    1.4关闭交换分区
    #所有节点
    [root@master-1 ~]# swapoff -a
    [root@master-1 ~]# sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

    1.5设置系统时间同步
    #所有节点
    [root@master-1 ~]# yum install -y ntpdate
    [root@master-1 ~]# ntpdate -u ntp.api.bz
    [root@master-1 ~]# echo "*/5 * * * * ntpdate time7.aliyun.com >/dev/null 2>&1" >> /etc/crontab
    [root@master-1 ~]# service crond restart
    [root@master-1 ~]# chkconfig crond on

    1.6 设置主机名
    #所有节点
    [root@master-1 ~]# cat > /etc/hosts <<EOF
    127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
    ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
    192.168.91.18 master-1
    192.168.91.19 master-2
    192.168.91.20 master-3
    192.168.91.21 node-1
    192.168.91.22 node-2
    EOF

    1.7 设置免密码登录
    #从任意Master节点分发配置到其他所有的节点(包括其他的Master与Node)
    #本例中从master-1分发
    [root@master-1 ~]# yum install -y expect
    [root@master-1 ~]# ssh-keygen -t rsa -P "" -f /root/.ssh/id_rsa
    #密码更换
    [root@master-1 ~]# export mypass=123456s
    [root@master-1 ~]# name=(master-1 master-2 master-3 node-1 node-2)
    [root@master-1 ~]# for i in ${name[@]};do
    expect -c "
    spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@$i
    expect {
    \"*yes/no*\" {send \"yes\r\"; exp_continue}
    \"*password*\" {send \"$mypass\r\"; exp_continue}
    \"*Password*\" {send \"$mypass\r\";}
    }"
    done
    #连接测试
    [root@master-1 ~]# ssh master-2

    1.8 优化内核参数
    #所有节点
    [root@master-1 ~]# modprobe br_netfilter
    [root@master-1 ~]# cat >>/etc/sysctl.conf<<EOF
    net.bridge.bridge-nf-call-iptables=1
    net.bridge.bridge-nf-call-ip6tables=1
    net.ipv4.ip_forward=1
    vm.swappiness=0
    fs.file-max=52706963
    fs.nr_open=52706963
    EOF


    #参数说明
    fs.file-max 系统级打开最大文件句柄的数量
    fs.nr_openlinux系统中设置epoll/pthread等最大进程数目

    #应用内核配置
    [root@master-1 ~]# sysctl -p


    1.9 高可用节点安装Keepalived
    #192.168.91.18
    [root@master-1 ~]# yum install -y keepalived


    #注意修改网卡地址与SLAVE节点的优先级
    [root@master-1 ~]# cat >/etc/keepalived/keepalived.conf <<EOL
    global_defs {
    router_id KUB_LVS
    }
    vrrp_script CheckMaster {
    script "curl -k https://192.168.91.254:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
    }
    vrrp_instance VI_1 {
    state MASTER
    interface ens32
    virtual_router_id 61
    priority 100
    advert_int 1
    nopreempt
    authentication {
    auth_type PASS
    auth_pass 111111
    }
    virtual_ipaddress {
    192.168.91.254/24 dev ens32
    }
    track_script {
    CheckMaster
    }
    }
    EOL

    #SLAVE
    #修改state为slave, priority 为 90

    1.10 启动keepalived
    [root@master-1 ~]# systemctl enable keepalived && systemctl restart keepalived
    [root@master-1 ~]# service keepalived status


    2 配置证书
    配置cfssl用于创建证书
    #提示生成证书工具有三种:easyrsa openssl cfssl
    #master-1
    [root@master-1 ~]# mkdir /soft; cd /soft
    [root@master-1 ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    [root@master-1 ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    [root@master-1 ~]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
    [root@master-1 ~]# chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
    [root@master-1 ~]# mv cfssl_linux-amd64 /usr/local/bin/cfssl
    [root@master-1 ~]# mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
    [root@master-1 ~]# mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

    2.1 生成ETCD证书
    #创建目录(Master-1)
    [root@master-1 ~]# mkdir /root/etcd && cd /root/etcd

    2.2.1 CA 证书配置(Master-1)
    [root@master-1 ~]# cat << EOF | tee ca-config.json
    {
    "signing": {
    "default": {
    "expiry": "87600h"
    },
    "profiles": {
    "www": {
    "expiry": "87600h",
    "usages": [
    "signing",
    "key encipherment",
    "server auth",
    "client auth"
    ]
    }
    }
    }
    }
    EOF
    # expiry 表示证书过期时间
    # 参数详解
    ca-config.json:可以定义多个 profiles,分别指定不同的过期时间、使用场景等参数;后续在签名证书时使用某个 profile;
    signing: 表示该证书可用于签名其它证书;生成的 ca.pem 证书中 CA=TRUE;
    server auth: 表示client可以用该 CA 对server提供的证书进行验证;
    client auth: 表示server可以用该CA对client提供的证书进行验证;


    2.2.2 创建CA证书请求文件(Master-1)
    [root@master-1 ~]# cat << EOF | tee ca-csr.json
    {
    "CN": "etcd CA",
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "L": "Beijing",
    "ST": "Beijing"
    }
    ]
    }
    EOF
    参数详解:
    CN:Common Name,kube-apiserver 从证书中提取该字段作为请求的用户名 (User Name);浏览器使用该字段验证网站是否合法;

    names中的字段:
    C : country,国家
    ST: state,州或省份
    L:location,城市
    O:organization,组织,kube-apiserver 从证书中提取该字段作为请求用户所属的组 (Group)
    OU:organization unit,组织单位

    2.2.3 创建ETCD证书请求文件
    #可以把所有的master IP 加入到csr文件中(Master-1)
    [root@master-1 ~]# cat << EOF | tee server-csr.json
    {
    "CN": "etcd",
    "hosts": [
    "master-1",
    "master-2",
    "master-3",
    "192.168.91.18",
    "192.168.91.19",
    "192.168.91.20"
    ],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "L": "Beijing",
    "ST": "Beijing"
    }
    ]
    }
    EOF
    #hosts包含的是授权范围,不在此范围的的节点或者服务使用此证书就会报证书不匹配错误. 为空或者""表示所有客户端的都可以使用

    2.2.4 生成 ETCD CA 证书和ETCD公私钥(Master-1)
    [root@master-1 ~]# cd /root/etcd/

    2.2.5
    #生成ca证书(Master-1)
    [root@master-1 ~]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca –
    [root@master-1 etcd]# ll

    2.2.6
    #生成etcd证书(Master-1)
    [root@master-1 etcd]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server


    # cfssl参数解释
    gencert: 生成新的key(密钥)和签名证书
    -initca:初始化一个新ca
    -ca:指明ca的证书
    -ca-key:指明ca的私钥文件
    -config:指明请求证书的json文件
    -profile:与-config中的profile对应,是指根据config中的profile段来生成证书的相关信息

    #查看证书
    [root@master-1 etcd]# ll


    2.2.7 #安装ETCD
    [root@master-1 soft]# tar xfv etcd-v3.4.7-linux-amd64.tar.gz
    [root@master-1 soft]# cd etcd-v3.4.7-linux-amd64
    [root@master-1 etcd-v3.4.7-linux-amd64]# cp etcd /usr/local/bin
    [root@master-1 etcd-v3.4.7-linux-amd64]# cp etcdctl /usr/local/bin
    [root@master-1 etcd-v3.4.7-linux-amd64]# chmod +x /usr/local/bin/etcd
    [root@master-1 etcd-v3.4.7-linux-amd64]# chmod +x /usr/local/bin/etcdctl

    2.2.8 #复制到其他的master节点
    [root@master-1 etcd-v3.4.7-linux-amd64]# for i in master-2 master-3;do scp /usr/local/bin/etcd* $i:/usr/local/bin/;done
    [root@master-1 ~]# mkdir -p /etc/etcd/{cfg,ssl}

    2.2.9 #其他master节点创建目录
    [root@master-1 etcd-v3.4.7-linux-amd64]# for i in master-2 master-3;do ssh $i "mkdir -p /etc/etcd/{cfg,ssl}";done

    2.3.0 #创建配置文件
    [root@master-1 ~]# cat >/etc/etcd/cfg/etcd.conf<<EOFL
    #[Member]
    ETCD_NAME="master-1"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.91.18:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.91.18:2379,http://192.168.91.18:2390"

    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.91.18:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.91.18:2379"
    ETCD_INITIAL_CLUSTER="master-1=https://192.168.91.18:2380,master-2=https://192.168.91.19:2380,master-3=https://192.168.91.20:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_ENABLE_V2="true"
    EOFL


    #参数说明:
    ETCD_NAME 节点名称, 如果有多个节点, 那么每个节点要修改为本节点的名称。
    ETCD_DATA_DIR 数据目录
    ETCD_LISTEN_PEER_URLS 集群通信监听地址
    ETCD_LISTEN_CLIENT_URLS 客户端访问监听地址
    ETCD_INITIAL_ADVERTISE_PEER_URLS 集群通告地址
    ETCD_ADVERTISE_CLIENT_URLS 客户端通告地址
    ETCD_INITIAL_CLUSTER 集群节点地址,如果多个节点那么逗号分隔
    ETCD_INITIAL_CLUSTER="master1=https://192.168.91.200:2380,master2=https://192.168.91.201:2380,master3=https://192.168.91.202:2380"
    ETCD_INITIAL_CLUSTER_TOKEN 集群Token
    ETCD_INITIAL_CLUSTER_STATE 加入集群的当前状态,new是新集群,existing表示加入已有集群


    2.3.1 创建ETCD的系统启动服务(所有master)
    报错:
    ETCD3.4版本会自动读取环境变量的参数,所以EnvironmentFile文件中有的参数,不需要再次在ExecStart启动参数中添加,二选一,
    如同时配置,会触发以下类似报错“etcd: conflicting environment variable "ETCD_NAME"
    is shadowed by corresponding command-line flag (either unset environment variable or disable flag)”


    [root@master-1 ~]# cat > /usr/lib/systemd/system/etcd.service<<EOFL
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target

    [Service]
    Type=notify
    EnvironmentFile=/etc/etcd/cfg/etcd.conf
    ExecStart=/usr/local/bin/etcd \
    --initial-cluster-state=new \
    --cert-file=/etc/etcd/ssl/server.pem \
    --key-file=/etc/etcd/ssl/server-key.pem \
    --peer-cert-file=/etc/etcd/ssl/server.pem \
    --peer-key-file=/etc/etcd/ssl/server-key.pem \
    --trusted-ca-file=/etc/etcd/ssl/ca.pem \
    --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem
    Restart=on-failure
    LimitNOFILE=65536

    [Install]
    WantedBy=multi-user.target
    EOFL

    2.3.2 复制etcd证书到指定目录
    #此目录与之前的ETCD启动目录相一致
    #如果有多个Master节点, 那么需要复制到每个Master (从master-1复制到其他master节点)
    [root@master-1 ~]# mkdir -p /etc/etcd/ssl/
    [root@master-1 ~]# \cp /root/etcd/*pem /etc/etcd/ssl/ -rf

    2.3.3 #复制etcd证书到每个节点
    [root@master-1 ~]# for i in master-2 master-3 node-1 node-2;do ssh $i mkdir -p /etc/etcd/{cfg,ssl};done
    [root@master-1 ~]# for i in master-2 master-3 node-1 node-2;do scp /etc/etcd/ssl/* $i:/etc/etcd/ssl/;done

    2.3.4 启动etcd (所有节点)
    [root@master-1 ~]# chkconfig etcd on
    [root@master-1 ~]# service etcd start
    [root@master-1 ~]# service etcd status


    2.3.5 #debug 模式启动测试
    /usr/local/bin/etcd \
    --name=master-1 \
    --data-dir=/var/lib/etcd/default.etcd \
    --listen-peer-urls=https://192.168.91.18:2380 \
    --listen-client-urls=https://192.168.91.18:2379,http://192.168.91.18:2390 \
    --advertise-client-urls=https://192.168.91.18:2379 \
    --initial-advertise-peer-urls=https://192.168.91.18:2380 \
    --initial-cluster=master-1=https://192.168.91.18:2380,master-2=https://192.168.91.19:2380,master-3=https://192.168.91.20:2380 \
    --initial-cluster-token=etcd-cluster \
    --initial-cluster-state=new \
    --cert-file=/etc/etcd/ssl/server.pem \
    --key-file=/etc/etcd/ssl/server-key.pem \
    --peer-cert-file=/etc/etcd/ssl/server.pem \
    --peer-key-file=/etc/etcd/ssl/server-key.pem \
    --trusted-ca-file=/etc/etcd/ssl/ca.pem \
    --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem


    2.3.6 #集群状态检查
    [root@master-2 ~]# etcdctl --cacert=/etc/etcd/ssl/ca.pem \
    --cert=/etc/etcd/ssl/server.pem \
    --key=/etc/etcd/ssl/server-key.pem \
    --endpoints="https://192.168.91.18:2379,https://192.168.91.19:2379,https://192.168.91.20:2379" \
    endpoint health


    #返回结果
    https://192.168.91.18:2379 is healthy: successfully committed proposal: took = 13.39398ms
    https://192.168.91.19:2379 is healthy: successfully committed proposal: took = 20.742964ms
    https://192.168.91.20:2379 is healthy: successfully committed proposal: took = 21.769598ms

    2.3.7 设置flannel 网段
    ETCDCTL_API=2 etcdctl \
    --endpoints="https://192.168.91.18:2379,https://192.168.91.19:2379,https://192.168.91.20:2379" \
    --ca-file=/etc/etcd/ssl/ca.pem \
    --key-file=/etc/etcd/ssl/server-key.pem \
    --cert-file=/etc/etcd/ssl/server.pem \
    set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan","Directrouting": true}}'


    2.3.8 查看设置flannel 网段
    ETCDCTL_API=2 etcdctl --ca-file=/etc/etcd/ssl/ca.pem \
    --cert-file=/etc/etcd/ssl/server.pem --key-file=/etc/etcd/ssl/server-key.pem \
    --endpoints="https://192.168.91.18:2379,https://192.168.91.19:2379,https://192.168.91.20:2379" \
    ls /coreos.com/network/config

    #输出以下内容, 表示正常
    /coreos.com/network/config


    3.0 生成kubernetes 证书
    #创建 Kubernetes 相关证书
    #此证书用于Kubernetes节点直接的通信, 与之前的ETCD证书不同. (Master-1)
    [root@master-1 ~]# mkdir /root/kubernetes/ && cd /root/kubernetes/

    3.1 配置ca 文件(Master-1)
    [root@master-1 ~]# cat << EOF | tee ca-config.json
    {
    "signing": {
    "default": {
    "expiry": "87600h"
    },
    "profiles": {
    "kubernetes": {
    "expiry": "87600h",
    "usages": [
    "signing",
    "key encipherment",
    "server auth",
    "client auth"
    ]
    }
    }
    }
    }
    EOF

    3.2 创建ca证书申请文件(Master-1)
    [root@master-1 ~]# cat << EOF | tee ca-csr.json
    {
    "CN": "kubernetes",
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "L": "Beijing",
    "ST": "Beijing",
    "O": "k8s",
    "OU": "System"
    }
    ]
    }
    EOF

    #证书字段意义
    CN=commonName (网站域名)
    OU=organizationUnit (组织部门名)
    O=organizationName (组织名)
    L=localityName (城市)
    S=stateName (省份)
    C=country (国家)

    3.3 生成API SERVER证书申请文件(Master-1)
    #注意要修改VIP的地址
    [root@master-1 ~]# cat << EOF | tee server-csr.json
    {
    "CN": "kubernetes",
    "hosts": [
    "10.0.0.1",
    "127.0.0.1",
    "10.0.0.2",
    "192.168.91.18",
    "192.168.91.19",
    "192.168.91.20",
    "192.168.91.21",
    "192.168.91.22",
    "192.168.91.254",
    "master-1",
    "master-2",
    "master-3",
    "node-1",
    "node-2",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
    ],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "L": "Beijing",
    "ST": "Beijing",
    "O": "k8s",
    "OU": "System"
    }
    ]
    }
    EOF

    3.4 创建 Kubernetes Proxy 证书申请文件(Master-1)
    [root@master-1 ~]# cat << EOF | tee kube-proxy-csr.json
    {
    "CN": "system:kube-proxy",
    "hosts": [],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "L": "Beijing",
    "ST": "Beijing",
    "O": "k8s",
    "OU": "System"
    }
    ]
    }
    EOF

    3.5 生成 kubernetes CA 证书和公私钥
    # 生成ca证书(Master-1)
    [root@master-1 ~]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca –

    3.5.1 # 生成 api-server 证书(Master-1)
    [root@master-1 ~]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem \
    -config=ca-config.json \
    -profile=kubernetes server-csr.json | cfssljson -bare server

    # cfssl参数
    gencert: 生成新的key(密钥)和签名证书
    -initca:初始化一个新ca
    -ca:指明ca的证书
    -ca-key:指明ca的私钥文件
    -config:指明请求证书的json文件
    -profile:与-config中的profile对应,是指根据config中的profile段来生成证书的相关信息

    3.5.2 # 生成 kube-proxy 证书(Master-1)
    [root@master-1 ~]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
    -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

    4 安装 Docker
    #安装CE版本
    [root@node-1 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
    [root@node-1 ~]# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
    [root@node-1 ~]# yum install -y docker-ce-19.03.6 docker-ce-cli-19.03.6 containerd.io

    #如果安装比较慢,使用本地安装
    [root@node-1 ~]# yum localinstall containerd.io-1.2.10-3.2.el7.x86_64.rpm \
    docker-ce-19.03.6-3.el7.x86_64.rpm \
    docker-ce-cli-19.03.6-3.el7.x86_64.rpm


    4.1 启动Docker服务
    [root@node-1 ~]# chkconfig docker on
    [root@node-1 ~]# service docker start
    [root@node-1 ~]# service docker status

    4.2 配置镜像加速器(所有node节点)
    [root@node-1 ~]# mkdir -p /etc/docker
    [root@node-1 ~]# tee /etc/docker/daemon.json <<-'EOF'
    {
    "registry-mirrors": ["https://plqjafsr.mirror.aliyuncs.com"]
    }
    EOF

    #重启docker
    [root@node-1 ~]# systemctl daemon-reload
    [root@node-1 ~]# systemctl restart docker

    5 安装flannel
    5.1 下载Flannel二进制包
    #所有的节点,下载到master-1
    [root@ master -1 ~]# mkdir /soft ; cd /soft
    [root@ master -1 ~]# wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
    [root@ master -1 ~]# tar xvf flannel-v0.11.0-linux-amd64.tar.gz
    [root@ master -1 ~]# mv flanneld mk-docker-opts.sh /usr/local/bin/

    #复制flanneld到其他的所有节点
    [root@ master -1 ~]# for i in master-2 master-3 node-1 node-2;do scp /usr/local/bin/mk-docker-opts.sh /usr/local/bin/flanneld $i:/usr/local/bin/;done

    5.2 配置Flannel (所有节点)
    [root@node-1 ~]# mkdir -p /etc/flannel
    [root@node-1 ~]# cat > /etc/flannel/flannel.cfg<<EOF
    FLANNEL_OPTIONS="-etcd-endpoints=https://192.168.91.18:2379,https://192.168.91.19:2379,https://192.168.91.20:2379 -etcd-cafile=/etc/etcd/ssl/ca.pem -etcd-certfile=/etc/etcd/ssl/server.pem -etcd-keyfile=/etc/etcd/ssl/server-key.pem"
    EOF

    5.3 配置flannel 配置文件
    cat > /usr/lib/systemd/system/flanneld.service <<EOF
    [Unit]
    Description=Flanneld overlay address etcd agent
    After=network-online.target network.target
    Before=docker.service

    [Service]
    Type=notify
    EnvironmentFile=/etc/flannel/flannel.cfg
    ExecStart=/usr/local/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
    ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
    Restart=on-failure

    [Install]
    WantedBy=multi-user.target
    EOF


    5.4 启动Flannel
    [root@node-1 ~]# service flanneld start
    [root@node-1 ~]# chkconfig flanneld on
    [root@node-2 ~]# service flanneld status


    5.5#node节点停止flanneld
    [root@node-1 ~]# service flanneld stop

    5.6 修改docker 配置文件(所有node节点)
    [root@node-1 ~]# cat >/usr/lib/systemd/system/docker.service<<EOFL
    [Unit]
    Description=Docker Application Container Engine
    Documentation=https://docs.docker.com
    After=network-online.target firewalld.service
    Wants=network-online.target

    [Service]
    Type=notify
    EnvironmentFile=/run/flannel/subnet.env
    ExecStart=/usr/bin/dockerd \$DOCKER_NETWORK_OPTIONS
    ExecReload=/bin/kill -s HUP \$MAINPID
    LimitNOFILE=infinity
    LimitNPROC=infinity
    LimitCORE=infinity
    TimeoutStartSec=0
    Delegate=yes
    KillMode=process
    Restart=on-failure
    StartLimitBurst=3
    StartLimitInterval=60s

    [Install]
    WantedBy=multi-user.target
    EOFL

    5.7 重启Docker服务
    [root@node-1 ~]# systemctl daemon-reload
    [root@node-1 ~]# service flanneld restart
    [root@node-1 ~]# service docker restart

    6 安装Master组件
    6.1 安装Api Server服务
    6.1.1 下载Kubernetes二进制包(1.18.3)(master-1)
    [root@master-1 soft]# cd /soft
    [root@master-1 soft]# tar xvf kubernetes-server-linux-amd64.tar.gz
    [root@master-1 soft]# cd kubernetes/server/bin/
    [root@master-1 soft]# cp kube-scheduler kube-apiserver kube-controller-manager kubectl /usr/local/bin/

    #复制执行文件到其他的master节点
    [root@master-1 bin]# for i in master-2 master-3;do scp /usr/local/bin/kube* $i:/usr/local/bin/;done

    6.1.2 配置Kubernetes证书
    #Kubernetes各个组件之间通信需要证书,需要复制个每个master节点(master-1)
    [root@master-1 soft]# mkdir -p /etc/kubernetes/{cfg,ssl}
    [root@master-1 soft]# cp /root/kubernetes/*.pem /etc/kubernetes/ssl/

    #复制到其他的节点
    [root@master-1 soft]# for i in master-2 master-3 node-1 node-2;do ssh $i mkdir -p /etc/kubernetes/{cfg,ssl};done
    [root@master-1 soft]# for i in master-2 master-3 node-1 node-2;do scp /etc/kubernetes/ssl/* $i:/etc/kubernetes/ssl/;done
    [root@master-1 bin]# for i in master-2 master-3 node-1 node-2;do echo $i "---------->"; ssh $i ls /etc/kubernetes/ssl;done

    6.1.4 编辑Token 文件(master-1)
    #f89a76f197526a0d4bc2bf9c86e871c3:随机字符串,自定义生成; kubelet-bootstrap:用户名; 10001:UID; system:kubelet-bootstrap:用户组
    [root@master-1 soft]# vim /etc/kubernetes/cfg/token.csv
    f89a76f197526a0d4bc2bf9c86e871c3,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

    #复制到其他的master节点
    [root@master-1 bin]# for i in master-2 master-3;do scp /etc/kubernetes/cfg/token.csv $i:/etc/kubernetes/cfg/token.csv;done

    #配置启动文件
    [root@master-1 soft]# cat >/etc/kubernetes/cfg/kube-apiserver.cfg <<EOFL
    KUBE_APISERVER_OPTS="--logtostderr=true \
    --v=4 \
    --insecure-bind-address=0.0.0.0 \
    --insecure-port=8080 \
    --etcd-servers=https://192.168.91.18:2379,https://192.168.91.19:2379,https://192.168.91.20:2379 \
    --bind-address=0.0.0.0 \
    --secure-port=6443 \
    --advertise-address=0.0.0.0 \
    --allow-privileged=true \
    --service-cluster-ip-range=10.0.0.0/24 \
    --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
    --authorization-mode=RBAC,Node \
    --enable-bootstrap-token-auth \
    --token-auth-file=/etc/kubernetes/cfg/token.csv \
    --service-node-port-range=30000-50000 \
    --tls-cert-file=/etc/kubernetes/ssl/server.pem \
    --tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \
    --client-ca-file=/etc/kubernetes/ssl/ca.pem \
    --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
    --etcd-cafile=/etc/etcd/ssl/ca.pem \
    --etcd-certfile=/etc/etcd/ssl/server.pem \
    --etcd-keyfile=/etc/etcd/ssl/server-key.pem"
    EOFL

    #设置启动
    cat >/usr/lib/systemd/system/kube-apiserver.service<<EOFL
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/kubernetes/kubernetes

    [Service]
    EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.cfg
    ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS
    Restart=on-failure

    [Install]
    WantedBy=multi-user.target
    EOFL

    #配置自动启动
    [root@master-1 soft]# service kube-apiserver start
    [root@master-1 soft]# chkconfig kube-apiserver on


    6.2 部署kube-scheduler 服务
    #创建kube-scheduler配置文件(所有的master节点)
    [root@master-1 soft]# cat >/etc/kubernetes/cfg/kube-scheduler.cfg<<EOFL
    KUBE_SCHEDULER_OPTS="--logtostderr=true --v=4 --bind-address=0.0.0.0 --master=127.0.0.1:8080 --leader-elect"
    EOFL

    6.3 设置启动文件
    [root@master-1 soft]# cat >/usr/lib/systemd/system/kube-scheduler.service<<EOFL
    [Unit]
    Description=Kubernetes Scheduler
    Documentation=https://github.com/kubernetes/kubernetes

    [Service]
    EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.cfg
    ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
    Restart=on-failure

    [Install]
    WantedBy=multi-user.target
    EOFL

    6.2.2 启动kube-scheduler服务(所有的master节点)
    [root@master-1 soft]# service kube-scheduler restart
    [root@master-1 soft]# chkconfig kube-scheduler on


    6.3 部署kube-controller-manager
    6.3.1创建kube-controller-manager配置文件(所有节点)
    [root@master-1 bin]# cat >/etc/kubernetes/cfg/kube-controller-manager.cfg<<EOFL
    KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \
    --v=4 \
    --master=127.0.0.1:8080 \
    --leader-elect=true \
    --address=0.0.0.0 \
    --service-cluster-ip-range=10.0.0.0/24 \
    --cluster-name=kubernetes \
    --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
    --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
    --root-ca-file=/etc/kubernetes/ssl/ca.pem \
    --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem"
    EOFL


    6.3.2 创建kube-controller-manager 启动文件
    [root@master-1 bin]# cat >/usr/lib/systemd/system/kube-controller-manager.service<<EOFL
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/kubernetes/kubernetes

    [Service]
    EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.cfg
    ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
    Restart=on-failure

    [Install]
    WantedBy=multi-user.target
    EOFL


    6.3.3启动kube-controller-manager服务
    [root@master-1 bin]# chkconfig kube-controller-manager on
    [root@master-1 bin]# service kube-controller-manager start
    [root@master-2 ~]# service kube-controller-manager status


    7.1.1 从Master节点复制Kubernetes 文件到Node
    #配置Node节点
    [root@master-1 bin]#cd /soft
    [root@master-1 bin]# for i in node-1 node-2;do scp kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy $i:/usr/local/bin/;done


    #Maste-1节点
    [root@master-1 bin]# mkdir /root/config ; cd /root/config
    [root@master-1 bin]# cat >environment.sh<<EOFL
    # 创建kubelet bootstrapping kubeconfig
    BOOTSTRAP_TOKEN=f89a76f197526a0d4bc2bf9c86e871c3
    KUBE_APISERVER="https://192.168.91.254:6443"
    # 设置集群参数
    kubectl config set-cluster kubernetes \
    --certificate-authority=/etc/kubernetes/ssl/ca.pem \
    --embed-certs=true \
    --server=\${KUBE_APISERVER} \
    --kubeconfig=bootstrap.kubeconfig
    # 设置客户端认证参数
    kubectl config set-credentials kubelet-bootstrap \
    --token=\${BOOTSTRAP_TOKEN} \
    --kubeconfig=bootstrap.kubeconfig
    # 设置上下文参数
    kubectl config set-context default \
    --cluster=kubernetes \
    --user=kubelet-bootstrap \
    --kubeconfig=bootstrap.kubeconfig
    # 设置默认上下文
    kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
    #通过 bash environment.sh获取 bootstrap.kubeconfig 配置文件。
    EOFL

    #执行脚本
    [root@master-1 bin]# sh environment.sh

    10.1.3创建kube-proxy kubeconfig文件 (master-1)
    [root@master-1 bin]# cat >env_proxy.sh<<EOF
    # 创建kube-proxy kubeconfig文件
    BOOTSTRAP_TOKEN=f89a76f197526a0d4bc2bf9c86e871c3
    KUBE_APISERVER="https://192.168.91.254:6443"

    kubectl config set-cluster kubernetes \
    --certificate-authority=/etc/kubernetes/ssl/ca.pem \
    --embed-certs=true \
    --server=\${KUBE_APISERVER} \
    --kubeconfig=kube-proxy.kubeconfig

    kubectl config set-credentials kube-proxy \
    --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
    --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
    --embed-certs=true \
    --kubeconfig=kube-proxy.kubeconfig

    kubectl config set-context default \
    --cluster=kubernetes \
    --user=kube-proxy \
    --kubeconfig=kube-proxy.kubeconfig

    kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
    EOF

    #执行脚本
    [root@master-1 bin]# sh env_proxy.sh


    10.1.4 复制kubeconfig文件与证书到所有Node节点
    #将bootstrap kubeconfig kube-proxy.kubeconfig 文件复制到所有Node节点
    #远程创建目录 (master-1)
    [root@master-1 bin]# for i in node-1 node-2;do ssh $i "mkdir -p /etc/kubernetes/{cfg,ssl}";done

    #复制证书文件ssl (master-1)
    [root@master-1 config]# for i in node-1 node-2;do scp /etc/kubernetes/ssl/* $i:/etc/kubernetes/ssl/;done


    #复制kubeconfig文件 (master-1)
    [root@master-1 bin]# cd /root/config
    [root@master-1 config]# for i in node-1 node-2;do scp -rp bootstrap.kubeconfig kube-proxy.kubeconfig $i:/etc/kubernetes/cfg/;done


    10.1.5 创建kubelet参数配置文件
    #不同的Node节点, 需要修改IP地址 (node节点操作)
    [root@ node-1 bin]# cat >/etc/kubernetes/cfg/kubelet.config<<EOF
    kind: KubeletConfiguration
    apiVersion: kubelet.config.k8s.io/v1beta1
    address: 192.168.91.21
    port: 10250
    readOnlyPort: 10255
    cgroupDriver: cgroupfs
    clusterDNS: ["10.0.0.2"]
    clusterDomain: cluster.local.
    failSwapOn: false
    authentication:
    anonymous:
    enabled: true
    EOF

    [root@node-1 bin]#cat >/etc/kubernetes/cfg/kubelet<<EOF
    KUBELET_OPTS="--logtostderr=true \
    --v=4 \
    --hostname-override=192.168.91.21 \
    --kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \
    --bootstrap-kubeconfig=/etc/kubernetes/cfg/bootstrap.kubeconfig \
    --config=/etc/kubernetes/cfg/kubelet.config \
    --cert-dir=/etc/kubernetes/ssl \
    --pod-infra-container-image=docker.io/kubernetes/pause:latest"
    EOF


    cat >/usr/lib/systemd/system/kubelet.service<<EOF
    [Unit]
    Description=Kubernetes Kubelet
    After=docker.service
    Requires=docker.service

    [Service]
    EnvironmentFile=/etc/kubernetes/cfg/kubelet
    ExecStart=/usr/local/bin/kubelet \$KUBELET_OPTS
    Restart=on-failure
    KillMode=process

    [Install]
    WantedBy=multi-user.target
    EOF


    10.1.8 将kubelet-bootstrap用户绑定到系统集群角色
    #master-1节点操作
    [root@master-1 bin]#kubectl create clusterrolebinding kubelet-bootstrap \
    --clusterrole=system:node-bootstrapper \
    --user=kubelet-bootstrap


    10.1.9 启动kubelet服务(node节点)
    [root@node-1 bin]#chkconfig kubelet on
    [root@node-1 bin]#service kubelet start
    [root@node-1 bin]#service kubelet status


    10.2.1 批准请求
    #Master节点操作
    [root@master-1 bin]#kubectl certificate approve node-csr-4_tHtI9Y1ZOd1V3ZF5URGT7bWuRZWOizZYgeaBiAHOY
    [root@master-1 bin]#kubectl certificate approve node-csr-bvq5buFKqAMvdJWOUjjP7hdez3xkQq5DPC4nNIL2vQs

    10.4.1 创建kube-proxy配置文件
    #注意修改hostname-override地址, 不同的节点则不同。
    [root@node-1 ~]#cat >/etc/kubernetes/cfg/kube-proxy<<EOF
    KUBE_PROXY_OPTS="--logtostderr=true \
    --v=4 \
    --metrics-bind-address=0.0.0.0 \
    --hostname-override=192.168.91.21 \
    --cluster-cidr=10.0.0.0/24 \
    --kubeconfig=/etc/kubernetes/cfg/kube-proxy.kubeconfig"
    EOF


    10.4.2 创建kube-proxy systemd unit 文件
    [root@node-1 ~]#cat >/usr/lib/systemd/system/kube-proxy.service<<EOF
    [Unit]
    Description=Kubernetes Proxy
    After=network.target

    [Service]
    EnvironmentFile=/etc/kubernetes/cfg/kube-proxy
    ExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_OPTS
    Restart=on-failure

    [Install]
    WantedBy=multi-user.target
    EOF


    10.4.3 启动kube-proxy 服务
    [root@node-1 ~]#chkconfig kube-proxy on
    [root@node-1 ~]#service kube-proxy start
    [root@node-1 ~]#service kube-proxy status


    运行Demo项目
    [root@master-1 config]# kubectl create deployment nginx-demo --image=nginx
    #扩容
    [root@master-1 config]# kubectl scale deployment nginx-demo --replicas 2

    #获取节点
    [root@master-1 config]# kubectl get pods -o wide

    [root@master-1 config]# kubectl expose deployment nginx-demo --port=88 --target-port=80 --type=NodePort
    [root@master-1 config]# kubectl get svc
    NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
    kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 43m
    nginx-demo NodePort 10.0.0.207 <none> 88:46965/TCP 3s

    #访问服务
    [root@master-1 config]# curl 192.168.91.21:46965
    <!DOCTYPE html>
    <html>
    <head>
    <title>Welcome to nginx!</title>
    <style>
    body {
    35em;
    margin: 0 auto;
    font-family: Tahoma, Verdana, Arial, sans-serif;
    }
    </style>
    </head>
    <body>
    <h1>Welcome to nginx!</h1>
    <p>If you see this page, the nginx web server is successfully installed and
    working. Further configuration is required.</p>

    <p>For online documentation and support please refer to
    <a href="http://nginx.org/">nginx.org</a>.<br/>
    Commercial support is available at
    <a href="http://nginx.com/">nginx.com</a>.</p>

    <p><em>Thank you for using nginx.</em></p>
    </body>
    </html>

    <p>For online documentation and support please refer to
    <a href="http://nginx.org/">nginx.org</a>.<br/>
    Commercial support is available at
    <a href="http://nginx.com/">nginx.com</a>.</p>

    <p><em>Thank you for using nginx.</em></p>
    </body>
    </html>

  • 相关阅读:
    HTTP常用的动词
    mysql5.7安装与主从复制
    Linq to XML 之XElement的Descendants方法的新发现
    SQL中的内连接外连接和交叉连接是什么意思?
    LINQ TO SQL ——Group by
    分布式Web服务器架构
    基于socket的客户端和服务端聊天机器人
    linq to sql (Group By/Having/Count/Sum/Min/Max/Avg操作符)
    关于301、302、303重定向的那些事
    手写async await的最简实现
  • 原文地址:https://www.cnblogs.com/gaoyuechen/p/16578546.html
Copyright © 2020-2023  润新知