• 03-kubernetes的二进制安装


    部门内网开发环境 kubernetes 二进制安装:
    kubernetes-1.13.1 + etcd-3.3.10 + flanneld-0.10集群部署

    1.前提准备:
    K8S服务部署安装:
    172.16.14.110 k8s-master
    172.16.14.112 k8s-node1
    172.16.14.115 k8s-node2

    # 三台主机分别修改hostname
    hostnamectl set-hostname k8s-master
    hostnamectl set-hostname k8s-node1
    hostnamectl set-hostname k8s-node2

    # 修改hosts文件
    cat >>/etc/hosts<<EOF
    172.16.14.110 k8s-master
    172.16.14.112 k8s-node1
    172.16.14.115 k8s-node2
    EOF

    # 关闭防火墙:
    systemctl stop firewalld
    systemctl disable firewalld
    setenforce 0
    sed -i 's/SELINUX=permissive/SELINUX=disabled/' /etc/sysconfig/selinux

    # 创建/etc/sysctl.d/k8s.conf文件
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.ip_forward = 1
    vm.swappiness = 0

    # 执行命令使修改生效
    modprobe br_netfilter
    sysctl -p /etc/sysctl.d/k8s.conf

    # 1.2kube-proxy开启ipvs的前置条件:
    cat > /etc/sysconfig/modules/ipvs.modules <<EOF
    #!/bin/bash
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

    # ipset软件包yum install ipset。 为了便于查看ipvs的代理规则,最好安装一下管理工具ipvsadm yum yum install ipvsadm ipset sysstat conntrack libseccomp -y

    # 关闭系统Swap:
    swapoff -a && sysctl -w vm.swappiness=0
    sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

    # 先升级内核
    [ ! -f /usr/bin/perl ] && yum install perl -y

    # 基础系统配置
    # 推荐内存2G/硬盘30G以上
    # 最小化安装Ubuntu 16.04 server或者CentOS 7 Minimal
    # 配置基础网络、更新源、SSH登陆等

    # 在deploy节点配置免密码登陆
    ssh-keygen -t rsa -b 2048 回车 回车 回车
    ssh-copy-id $IPs #$IPs为所有节点地址包括自身,按照提示输入yes 和root密码


    2.1 下载软件
    wget https://dl.k8s.io/v1.13.1/kubernetes-server-linux-amd64.tar.gz
    wget https://dl.k8s.io/v1.13.1/kubernetes-client-linux-amd64.tar.gz
    wget https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz
    wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz

    2.2 cfssl安装
    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
    chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
    mv cfssl_linux-amd64 /usr/local/bin/cfssl
    mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
    mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

    2.3 创建etcd证书
    mkdir /k8s/etcd/{bin,cfg,ssl} -p
    mkdir /k8s/kubernetes/{bin,cfg,ssl} -p
    cd /k8s/etcd/ssl/

    2.4 etcd ca配置
    cat << EOF | tee ca-config.json
    {
    "signing": {
    "default": {
    "expiry": "87600h"
    },
    "profiles": {
    "etcd": {
    "expiry": "87600h",
    "usages": [
    "signing",
    "key encipherment",
    "server auth",
    "client auth"
    ]
    }
    }
    }
    }
    EOF

    2.5 etcd ca证书
    cat << EOF | tee ca-csr.json
    {
    "CN": "etcd CA",
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "L": "Beijing",
    "ST": "Beijing"
    }
    ]
    }
    EOF

    2.6 etcd server证书
    cat << EOF | tee server-csr.json
    {
    "CN": "etcd",
    "hosts": [
    "172.16.14.110",
    "172.16.14.112",
    "172.16.14.115"
    ],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "L": "Beijing",
    "ST": "Beijing"
    }
    ]
    }
    EOF

    2.7 生成etcd ca证书和私钥 初始化ca
    cfssl gencert -initca ca-csr.json | cfssljson -bare ca
    [root@elasticsearch01 ssl]# ls
    ca-config.json ca-csr.json server-csr.json
    [root@elasticsearch01 ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
    2018/12/26 16:13:54 [INFO] generating a new CA key and certificate from CSR
    2018/12/26 16:13:54 [INFO] generate received request
    2018/12/26 16:13:54 [INFO] received CSR
    2018/12/26 16:13:54 [INFO] generating key: rsa-2048
    2018/12/26 16:13:54 [INFO] encoded CSR
    2018/12/26 16:13:54 [INFO] signed certificate with serial number 144752911121073185391033754516204538929473929443
    [root@elasticsearch01 ssl]# ls
    ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem server-csr.json

    2.8 生成server证书
    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=etcd server-csr.json | cfssljson -bare server
    [root@elasticsearch01 ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=etcd server-csr.json | cfssljson -bare server
    2018/12/26 16:18:53 [INFO] generate received request
    2018/12/26 16:18:53 [INFO] received CSR
    2018/12/26 16:18:53 [INFO] generating key: rsa-2048
    2018/12/26 16:18:54 [INFO] encoded CSR
    2018/12/26 16:18:54 [INFO] signed certificate with serial number 388122587040599986639159163167557684970159030057
    2018/12/26 16:18:54 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for websites.
    For more information see the Baseline Requirements for the Issuance and Management of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    [root@elasticsearch01 ssl]# ls
    ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem server.csr server-csr.json serve

    2.9 etcd安装
    1)解压缩
    tar -xvf etcd-v3.3.10-linux-amd64.tar.gz
    cd etcd-v3.3.10-linux-amd64/
    cp etcd etcdctl /k8s/etcd/bin/
    2)配置etcd主文件
    vim cat /k8s/etcd/cfg/etcd.conf
    # [Member]
    ETCD_NAME="etcd01"
    ETCD_DATA_DIR="/data1/etcd"
    ETCD_LISTEN_PEER_URLS="https://172.16.14.110:2380"
    ETCD_LISTEN_CLIENT_URLS="https://172.16.14.110:2379"

    # [Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.16.14.110:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://172.16.14.110:2379"
    ETCD_INITIAL_CLUSTER="etcd01=https://172.16.14.110:2380,etcd02=https://172.16.14.112:2380,etcd03=https://172.16.14.115:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"

    # [Security]
    ETCD_CERT_FILE="/k8s/etcd/ssl/server.pem"
    ETCD_KEY_FILE="/k8s/etcd/ssl/server-key.pem"
    ETCD_TRUSTED_CA_FILE="/k8s/etcd/ssl/ca.pem"
    ETCD_CLIENT_CERT_AUTH="true"
    ETCD_PEER_CERT_FILE="/k8s/etcd/ssl/server.pem"
    ETCD_PEER_KEY_FILE="/k8s/etcd/ssl/server-key.pem"
    ETCD_PEER_TRUSTED_CA_FILE="/k8s/etcd/ssl/ca.pem"
    ETCD_PEER_CLIENT_CERT_AUTH="true"

    3)配置etcd启动文件
    mkdir /data1/etcd
    vim /usr/lib/systemd/system/etcd.service
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target

    [Service]
    Type=notify
    WorkingDirectory=/data1/etcd/
    EnvironmentFile=-/k8s/etcd/cfg/etcd.conf
    # set GOMAXPROCS to number of processors
    ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /k8s/etcd/bin/etcd --name="${ETCD_NAME}" --data-dir="${ETCD_DATA_DIR}" --listen-client-urls="${ETCD_LISTEN_CLIENT_URLS}" --listen-peer-urls="${ETCD_LISTEN_PEER_URLS}" --advertise-client-urls="${ETCD_ADVERTISE_CLIENT_URLS}" --initial-cluster-token="${ETCD_INITIAL_CLUSTER_TOKEN}" --initial-cluster="${ETCD_INITIAL_CLUSTER}" --initial-cluster-state="${ETCD_INITIAL_CLUSTER_STATE}" --cert-file="${ETCD_CERT_FILE}" --key-file="${ETCD_KEY_FILE}" --trusted-ca-file="${ETCD_TRUSTED_CA_FILE}" --client-cert-auth="${ETCD_CLIENT_CERT_AUTH}" --peer-cert-file="${ETCD_PEER_CERT_FILE}" --peer-key-file="${ETCD_PEER_KEY_FILE}" --peer-trusted-ca-file="${ETCD_PEER_TRUSTED_CA_FILE}" --peer-client-cert-auth="${ETCD_PEER_CLIENT_CERT_AUTH}""
    Restart=on-failure
    LimitNOFILE=65536

    [Install]
    WantedBy=multi-user.target

    4)启动 注意启动前etcd02、etcd03同样配置下
    使用scp 把/k8s/etcd/ssl/ 目录下的证书同步到其它节点的机器。
    scp /k8s/etcd/ssl/* 172.16.14.112:/k8s/etcd/ssl/
    scp /k8s/etcd/ssl/* 172.16.14.115:/k8s/etcd/ssl/

    systemctl daemon-reload
    systemctl enable etcd
    systemctl start etcd

    5)服务检查
    [root@k8s-master ssl]# /k8s/etcd/bin/etcdctl --ca-file=/k8s/etcd/ssl/ca.pem --cert-file=/k8s/etcd/ssl/server.pem --key-file=/k8s/etcd/ssl/server-key.pem --endpoints="https://172.16.14.110:2379,https://172.16.14.112:2379,https://172.16.14.115:2379" cluster-health
    member 68618120123ab77 is healthy: got healthy result from https://172.16.14.112:2379
    member a551c491a4bd4c8d is healthy: got healthy result from https://172.16.14.110:2379
    member c3fb58e1a5f59c7c is healthy: got healthy result from https://172.16.14.115:2379
    cluster is healthy

    3.0 生成kubernets证书与私钥
    1)制作kubernetes ca证书
    cd /k8s/kubernetes/ssl
    cat << EOF | tee ca-config.json
    {
    "signing": {
    "default": {
    "expiry": "87600h"
    },
    "profiles": {
    "kubernetes": {
    "expiry": "87600h",
    "usages": [
    "signing",
    "key encipherment",
    "server auth",
    "client auth"
    ]
    }
    }
    }
    }
    EOF
    cat << EOF | tee ca-csr.json
    {
    "CN": "kubernetes",
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "L": "Beijing",
    "ST": "Beijing",
    "O": "k8s",
    "OU": "System"
    }
    ]
    }
    EOF
    cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    [root@elasticsearch01 ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    2018/12/27 09:47:08 [INFO] generating a new CA key and certificate from CSR
    2018/12/27 09:47:08 [INFO] generate received request
    2018/12/27 09:47:08 [INFO] received CSR
    2018/12/27 09:47:08 [INFO] generating key: rsa-2048
    2018/12/27 09:47:08 [INFO] encoded CSR
    2018/12/27 09:47:08 [INFO] signed certificate with serial number 156611735285008649323551446985295933852737436614
    [root@elasticsearch01 ssl]# ls
    ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem

    2)制作apiserver证书
    cat << EOF | tee server-csr.json
    {
    "CN": "etcd",
    "hosts": [
    "172.16.14.110",
    "172.16.14.112",
    "172.16.14.115"
    ],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "L": "Beijing",
    "ST": "Beijing"
    }
    ]
    }
    EOF

    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
    [root@elasticsearch01 ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
    2018/12/27 09:51:56 [INFO] generate received request
    2018/12/27 09:51:56 [INFO] received CSR
    2018/12/27 09:51:56 [INFO] generating key: rsa-2048
    2018/12/27 09:51:56 [INFO] encoded CSR
    2018/12/27 09:51:56 [INFO] signed certificate with serial number 399376216731194654868387199081648887334508501005
    2018/12/27 09:51:56 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    [root@elasticsearch01 ssl]# ls
    ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem server.csr server-csr.json server-key.pem server.pem

    3)制作kube-proxy证书
    cat << EOF | tee kube-proxy-csr.json
    {
    "CN": "system:kube-proxy",
    "hosts": [],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "L": "Beijing",
    "ST": "Beijing",
    "O": "k8s",
    "OU": "System"
    }
    ]
    }
    EOF

    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
    [root@elasticsearch01 ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
    2018/12/27 09:52:40 [INFO] generate received request
    2018/12/27 09:52:40 [INFO] received CSR
    2018/12/27 09:52:40 [INFO] generating key: rsa-2048
    2018/12/27 09:52:40 [INFO] encoded CSR
    2018/12/27 09:52:40 [INFO] signed certificate with serial number 633932731787505365511506755558794469389165123417
    2018/12/27 09:52:40 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    [root@elasticsearch01 ssl]# ls
    ca-config.json ca-csr.json ca.pem kube-proxy-csr.json kube-proxy.pem server-csr.json server.pem
    ca.csr ca-key.pem kube-proxy.csr kube-proxy-key.pem server.csr server-key.pem

    3.1 部署kubernetes server
    1)解压缩文件
    tar -zxvf kubernetes-server-linux-amd64.tar.gz
    cd kubernetes/server/bin/
    cp kube-scheduler kube-apiserver kube-controller-manager kubectl /k8s/kubernetes/bin/
    2)部署kube-apiserver组件 创建TLS Bootstrapping Token
    [root@elasticsearch01 bin]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
    f2c50331f07be89278acdaf341ff1ecc

    vim /k8s/kubernetes/cfg/token.csv
    f2c50331f07be89278acdaf341ff1ecc,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    创建Apiserver配置文件
    vim /k8s/kubernetes/cfg/kube-apiserver
    KUBE_APISERVER_OPTS="--logtostderr=true
    --v=4
    --etcd-servers=https://172.16.14.110:2379,https://172.16.14.112:2379,https://172.16.14.115:2379
    --bind-address=172.16.14.110
    --secure-port=6443
    --advertise-address=172.16.14.110
    --allow-privileged=true
    --service-cluster-ip-range=10.254.0.0/16
    --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction
    --authorization-mode=RBAC,Node
    --enable-bootstrap-token-auth
    --token-auth-file=/k8s/kubernetes/cfg/token.csv
    --service-node-port-range=30000-50000
    --tls-cert-file=/k8s/kubernetes/ssl/server.pem
    --tls-private-key-file=/k8s/kubernetes/ssl/server-key.pem
    --client-ca-file=/k8s/kubernetes/ssl/ca.pem
    --service-account-key-file=/k8s/kubernetes/ssl/ca-key.pem
    --etcd-cafile=/k8s/etcd/ssl/ca.pem
    --etcd-certfile=/k8s/etcd/ssl/server.pem
    --etcd-keyfile=/k8s/etcd/ssl/server-key.pem"

    创建apiserver systemd文件
    vim /usr/lib/systemd/system/kube-apiserver.service
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/kubernetes/kubernetes

    [Service]
    EnvironmentFile=-/k8s/kubernetes/cfg/kube-apiserver
    ExecStart=/k8s/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
    Restart=on-failure

    [Install]
    WantedBy=multi-user.target

    启动服务
    systemctl daemon-reload
    systemctl enable kube-apiserver
    systemctl start kube-apiserver
    [root@elasticsearch01 bin]# systemctl status kube-apiserver
    ● kube-apiserver.service - Kubernetes API Server
    Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
    Active: active (running) since Thu 2018-12-27 14:41:22 CST; 20s ago
    Docs: https://github.com/kubernetes/kubernetes
    Main PID: 22060 (kube-apiserver)
    CGroup: /system.slice/kube-apiserver.service
    └─22060 /k8s/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://10.2.8.44:2379,https://10.2....


    [root@elasticsearch01 bin]# ps -ef |grep kube-apiserver
    root 22060 1 5 14:41 ? 00:00:14 /k8s/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://10.2.8.44:2379,https://10.2.8.65:2379,https://10.2.8.34:2379 --bind-address=10.2.8.44 --secure-port=6443 --advertise-address=10.2.8.44 --allow-privileged=true --service-cluster-ip-range=10.254.0.0/16 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --enable-bootstrap-token-auth --token-auth-file=/k8s/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/k8s/kubernetes/ssl/server.pem --tls-private-key-file=/k8s/kubernetes/ssl/server-key.pem --client-ca-file=/k8s/kubernetes/ssl/ca.pem --service-account-key-file=/k8s/kubernetes/ssl/ca-key.pem --etcd-cafile=/k8s/etcd/ssl/ca.pem --etcd-certfile=/k8s/etcd/ssl/server.pem --etcd-keyfile=/k8s/etcd/ssl/server-key.pem

    [root@elasticsearch01 bin]# netstat -tulpn |grep kube-apiserve
    tcp 0 0 10.2.8.44:6443 0.0.0.0:* LISTEN 22060/kube-apiserve
    tcp 0 0 127.0.0.1:8080 0.0.0.0:* LISTEN 22060/kube-apiserve

    3)部署kube-scheduler组件 创建kube-scheduler配置文件
    vim /k8s/kubernetes/cfg/kube-scheduler
    KUBE_SCHEDULER_OPTS="--logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect"

    创建kube-scheduler systemd文件
    vim /usr/lib/systemd/system/kube-scheduler.service
    [Unit]
    Description=Kubernetes Scheduler
    Documentation=https://github.com/kubernetes/kubernetes

    [Service]
    EnvironmentFile=-/k8s/kubernetes/cfg/kube-scheduler
    ExecStart=/k8s/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
    Restart=on-failure

    [Install]
    WantedBy=multi-user.target

    启动服务
    systemctl daemon-reload
    systemctl enable kube-scheduler.service
    systemctl start kube-scheduler.service
    [root@elasticsearch01 bin]# systemctl status kube-scheduler.service
    ● kube-scheduler.service - Kubernetes Scheduler
    Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
    Active: active (running) since Thu 2018-12-27 15:16:51 CST; 17s ago
    Docs: https://github.com/kubernetes/kubernetes
    Main PID: 29026 (kube-scheduler)
    CGroup: /system.slice/kube-scheduler.service
    └─29026 /k8s/kubernetes/bin/kube-scheduler --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect

    4)部署kube-controller-manager组件 创建kube-controller-manager配置文件
    vim /k8s/kubernetes/cfg/kube-controller-manager
    KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true
    --v=4
    --master=127.0.0.1:8080
    --leader-elect=true
    --address=127.0.0.1
    --service-cluster-ip-range=10.254.0.0/16
    --cluster-name=kubernetes
    --cluster-signing-cert-file=/k8s/kubernetes/ssl/ca.pem
    --cluster-signing-key-file=/k8s/kubernetes/ssl/ca-key.pem
    --root-ca-file=/k8s/kubernetes/ssl/ca.pem
    --service-account-private-key-file=/k8s/kubernetes/ssl/ca-key.pem"

    创建kube-controller-manager systemd文件
    vim /usr/lib/systemd/system/kube-controller-manager.service

    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/kubernetes/kubernetes

    [Service]
    EnvironmentFile=-/k8s/kubernetes/cfg/kube-controller-manager
    ExecStart=/k8s/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
    Restart=on-failure

    [Install]
    WantedBy=multi-user.target

    启动服务
    systemctl daemon-reload
    systemctl enable kube-controller-manager
    systemctl start kube-controller-manager
    [root@elasticsearch01 bin]# systemctl status kube-controller-manager
    ● kube-controller-manager.service - Kubernetes Controller Manager
    Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
    Active: active (running) since Thu 2018-12-27 15:19:19 CST; 11s ago
    Docs: https://github.com/kubernetes/kubernetes
    Main PID: 29510 (kube-controller)
    CGroup: /system.slice/kube-controller-manager.service
    └─29510 /k8s/kubernetes/bin/kube-controller-manager --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect=tru..

    5) 验证kubeserver服务
    设置环境变量
    vim /etc/profile
    PATH=/k8s/kubernetes/bin:$PATH
    source /etc/profile

    查看master服务状态
    kubectl get cs,nodes
    [root@k8s-master ssl]# kubectl get cs,nodes
    NAME STATUS MESSAGE ERROR
    componentstatus/scheduler Healthy ok
    componentstatus/controller-manager Healthy ok
    componentstatus/etcd-0 Healthy {"health":"true"}
    componentstatus/etcd-1 Healthy {"health":"true"}
    componentstatus/etcd-2 Healthy {"health":"true"}

    4.0 Node部署
    4.1 在node节点,安装好docker:
    # 安装docker的yum源: 所有节点 使用阿里云的节点
    sudo yum install -y yum-utils device-mapper-persistent-data lvm2
    sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

    # 查看最新的Docker版本:
    yum list docker-ce.x86_64 --showduplicates |sort -r

    # 我们这里在各节点安装docker的18.06.1版本:
    # 更新 yum 缓存
    sudo yum makecache fast
    sudo yum install -y --setopt=obsoletes=0
    docker-ce-18.06.1.ce-3.el7

    # 启动docker
    sudo systemctl start docker
    sudo systemctl enable docker

    4.2 部署kubelet组件
    1)安装二进制文件
    wget https://dl.k8s.io/v1.13.1/kubernetes-node-linux-amd64.tar.gz
    tar zxvf kubernetes-node-linux-amd64.tar.gz
    cd kubernetes/node/bin/
    cp kube-proxy kubelet kubectl /k8s/kubernetes/bin/

    2)复制相关证书到node节点
    scp *.pem 172.16.14.112:/k8s/kubernetes/ssl
    scp *.pem 172.16.14.115:/k8s/kubernetes/ssl

    3)创建kubelet bootstrap kubeconfig文件 通过脚本实现
    vim /k8s/kubernetes/cfg/environment.sh
    #!/bin/bash
    #创建kubelet bootstrapping kubeconfig
    BOOTSTRAP_TOKEN=e099f36a869d639e79d4550052eae8dd
    KUBE_APISERVER="https://172.16.14.110:6443"
    #设置集群参数
    kubectl config set-cluster kubernetes
    --certificate-authority=/k8s/kubernetes/ssl/ca.pem
    --embed-certs=true
    --server=${KUBE_APISERVER}
    --kubeconfig=bootstrap.kubeconfig

    #设置客户端认证参数
    kubectl config set-credentials kubelet-bootstrap
    --token=${BOOTSTRAP_TOKEN}
    --kubeconfig=bootstrap.kubeconfig

    # 设置上下文参数
    kubectl config set-context default
    --cluster=kubernetes
    --user=kubelet-bootstrap
    --kubeconfig=bootstrap.kubeconfig

    # 设置默认上下文
    kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

    #----------------------

    # 创建kube-proxy kubeconfig文件

    kubectl config set-cluster kubernetes
    --certificate-authority=/k8s/kubernetes/ssl/ca.pem
    --embed-certs=true
    --server=${KUBE_APISERVER}
    --kubeconfig=kube-proxy.kubeconfig

    kubectl config set-credentials kube-proxy
    --client-certificate=/k8s/kubernetes/ssl/kube-proxy.pem
    --client-key=/k8s/kubernetes/ssl/kube-proxy-key.pem
    --embed-certs=true
    --kubeconfig=kube-proxy.kubeconfig

    kubectl config set-context default
    --cluster=kubernetes
    --user=kube-proxy
    --kubeconfig=kube-proxy.kubeconfig

    kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

    执行脚本
    [root@elasticsearch02 cfg]# sh environment.sh
    Cluster "kubernetes" set.
    User "kubelet-bootstrap" set.
    Context "default" created.
    Switched to context "default".
    Cluster "kubernetes" set.
    User "kube-proxy" set.
    Context "default" created.
    Switched to context "default".
    [root@elasticsearch02 cfg]# ls
    bootstrap.kubeconfig environment.sh kube-proxy.kubeconfig

    4)创建kubelet参数配置模板文件
    vim /k8s/kubernetes/cfg/kubelet.config
    kind: KubeletConfiguration
    apiVersion: kubelet.config.k8s.io/v1beta1
    address: 172.16.14.112
    port: 10250
    readOnlyPort: 10255
    cgroupDriver: cgroupfs
    clusterDNS: ["10.254.0.10"]
    clusterDomain: cluster.local.
    failSwapOn: false
    authentication:
    anonymous:
    enabled: true

    5)创建kubelet配置文件
    vim /k8s/kubernetes/cfg/kubelet

    KUBELET_OPTS="--logtostderr=true
    --v=4
    --hostname-override=172.16.14.112
    --kubeconfig=/k8s/kubernetes/cfg/kubelet.kubeconfig
    --bootstrap-kubeconfig=/k8s/kubernetes/cfg/bootstrap.kubeconfig
    --config=/k8s/kubernetes/cfg/kubelet.config
    --cert-dir=/k8s/kubernetes/ssl
    --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

    6)创建kubelet systemd文件
    vim /usr/lib/systemd/system/kubelet.service

    [Unit]
    Description=Kubernetes Kubelet
    After=docker.service
    Requires=docker.service

    [Service]
    EnvironmentFile=/k8s/kubernetes/cfg/kubelet
    ExecStart=/k8s/kubernetes/bin/kubelet $KUBELET_OPTS
    Restart=on-failure
    KillMode=process

    [Install]
    WantedBy=multi-user.target

    7)将kubelet-bootstrap用户绑定到系统集群角色
    kubectl create clusterrolebinding kubelet-bootstrap
    --clusterrole=system:node-bootstrapper
    --user=kubelet-bootstrap
    注意这个默认连接localhost:8080端口,可以在master上操作
    [root@elasticsearch01 ssl]# kubectl create clusterrolebinding kubelet-bootstrap
    > --clusterrole=system:node-bootstrapper
    > --user=kubelet-bootstrap
    clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

    8)启动服务 systemctl daemon-reload systemctl enable kubelet systemctl start kubelet
    [root@elasticsearch02 cfg]# systemctl status kubelet
    ● kubelet.service - Kubernetes Kubelet
    Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
    Active: active (running) since Thu 2018-12-27 17:34:30 CST; 18s ago
    Main PID: 24676 (kubelet)
    Memory: 88.6M
    CGroup: /system.slice/kubelet.service
    └─24676 /k8s/kubernetes/bin/kubelet --logtostderr=true --v=4 --hostname-override=10.2.8.44 --kubeconfig=/k8s/kubernetes...

    9)Master接受kubelet CSR请求 可以手动或自动 approve CSR 请求。推荐使用自动的方式,因为从 v1.8 版本开始,可以自动轮转approve csr 后生成的证书,如下是手动 approve CSR请求操作方法 查看CSR列表
    [root@elasticsearch01 ssl]# kubectl get csr
    NAME AGE REQUESTOR CONDITION
    node-csr-ij3py9j-yi-eoa8sOHMDs7VeTQtMv0N3Efj3ByZLMdc 102s kubelet-bootstrap Pending
    接受node
    [root@elasticsearch01 ssl]# kubectl certificate approve node-csr-ij3py9j-yi-eoa8sOHMDs7VeTQtMv0N3Efj3ByZLMdc
    certificatesigningrequest.certificates.k8s.io/node-csr-ij3py9j-yi-eoa8sOHMDs7VeTQtMv0N3Efj3ByZLMdc approved
    再查看CSR
    [root@elasticsearch01 ssl]# kubectl get csr
    NAME AGE REQUESTOR CONDITION
    node-csr-ij3py9j-yi-eoa8sOHMDs7VeTQtMv0N3Efj3ByZLMdc 5m13s kubelet-bootstrap Approved,Issued

    4.3 部署kube-proxy组件
    kube-proxy 运行在所有 node节点上,它监听 apiserver 中 service 和 Endpoint 的变化情况,创建路由规则来进行服务负载均衡 1)创建 kube-proxy 配置文件
    vim /k8s/kubernetes/cfg/kube-proxy
    KUBE_PROXY_OPTS="--logtostderr=true
    --v=4
    --hostname-override=172.16.12.112
    --cluster-cidr=10.254.0.0/16
    --kubeconfig=/k8s/kubernetes/cfg/kube-proxy.kubeconfig"
    2)创建kube-proxy systemd文件
    vim /usr/lib/systemd/system/kube-proxy.service

    [Unit]
    Description=Kubernetes Proxy
    After=network.target

    [Service]
    EnvironmentFile=-/k8s/kubernetes/cfg/kube-proxy
    ExecStart=/k8s/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
    Restart=on-failure

    [Install]
    WantedBy=multi-user.target
    3)启动服务 systemctl daemon-reload systemctl enable kube-proxy systemctl start kube-proxy
    [root@elasticsearch02 cfg]# systemctl status kube-proxy
    ● kube-proxy.service - Kubernetes Proxy
    Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
    Active: active (running) since Thu 2018-12-27 18:31:42 CST; 11s ago
    Main PID: 5376 (kube-proxy)
    Memory: 40.9M
    CGroup: /system.slice/kube-proxy.service
    ‣ 5376 /k8s/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=10.2.8.44 --cluster-cidr=10.254.0.0/...
    4)查看集群状态
    [root@elasticsearch01 cfg]# kubectl get nodes
    NAME STATUS ROLES AGE VERSION
    10.2.8.65 Ready <none> 9m15s v1.13.1
    5)同样操作部署node 10.2.8.34并认证csr,认证后会生成kubelet-client证书
    注意期间要是kubelet,kube-proxy配置错误,比如监听IP或者hostname错误导致node not found,需要删除kubelet-client证书,重启kubelet服务,重启认证csr即可
    [root@elasticsearch03 kubernetes]# ls ssl
    ca-key.pem kubelet-client-2018-12-27-20-13-52.pem kubelet.crt kube-proxy-key.pem server-key.pem
    ca.pem kubelet-client-current.pem kubelet.key kube-proxy.pem server.pem

    [root@k8s-master ssl]# kubectl get nodes
    NAME STATUS ROLES AGE VERSION
    172.16.14.112 Ready <none> 5h10m v1.13.1
    172.16.14.115 Ready <none> 5h10m v1.13.1

    5. Flanneld网络部署
    5.1 etcd注册网段
    /k8s/etcd/bin/etcdctl --ca-file=/k8s/etcd/ssl/ca.pem --cert-file=/k8s/etcd/ssl/server.pem --key-file=/k8s/etcd/ssl/server-key.pem --endpoints="https://172.16.14.110:2379,https://172.16.14.112:2379,https://172.16.14.115:2379" set /k8s/network/config '{ "Network": "10.254.0.0/16", "Backend": {"Type": "vxlan"}}'

    5.2 flannel安装
    1)解压安装
    tar -xvf flannel-v0.10.0-linux-amd64.tar.gz
    mv flanneld mk-docker-opts.sh /k8s/kubernetes/bin/

    2)配置flanneld
    vim /k8s/kubernetes/cfg/flanneld
    FLANNEL_OPTIONS="--etcd-endpoints=https://172.16.14.110:2379,https://172.16.14.112:2379,https://172.16.14.115:2379 -etcd-cafile=/k8s/etcd/ssl/ca.pem -etcd-certfile=/k8s/etcd/ssl/server.pem -etcd-keyfile=/k8s/etcd/ssl/server-key.pem -etcd-prefix=/k8s/network"

    创建flanneld systemd文件
    vim /usr/lib/systemd/system/flanneld.service
    [Unit]
    Description=Flanneld overlay address etcd agent
    After=network-online.target network.target
    Before=docker.service

    [Service]
    Type=notify
    EnvironmentFile=/k8s/kubernetes/cfg/flanneld
    ExecStart=/k8s/kubernetes/bin/flanneld --ip-masq $FLANNEL_OPTIONS
    ExecStartPost=/k8s/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
    Restart=on-failure

    [Install]
    WantedBy=multi-user.target

    3)配置Docker启动指定子网 修改EnvironmentFile=/run/flannel/subnet.env,ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS即可
    vim /usr/lib/systemd/system/docker.service
    [Unit]
    Description=Docker Application Container Engine
    Documentation=https://docs.docker.com
    After=network-online.target firewalld.service
    Wants=network-online.target

    [Service]
    Type=notify
    EnvironmentFile=/run/flannel/subnet.env
    ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
    ExecReload=/bin/kill -s HUP $MAINPID
    LimitNOFILE=infinity
    LimitNPROC=infinity
    LimitCORE=infinity
    TimeoutStartSec=0
    Delegate=yes
    KillMode=process
    Restart=on-failure
    StartLimitBurst=3
    StartLimitInterval=60s

    [Install]
    WantedBy=multi-user.target
    4)启动服务 注意启动flannel前要关闭docker及相关的kubelet这样flannel才会覆盖docker0网桥
    systemctl daemon-reload
    systemctl stop docker
    systemctl start flanneld
    systemctl enable flanneld
    systemctl start docker
    systemctl restart kubelet
    systemctl restart kube-proxy
    5)验证服务
    [root@elasticsearch02 bin]# cat /run/flannel/subnet.env
    DOCKER_OPT_BIP="--bip=10.254.35.1/24"
    DOCKER_OPT_IPMASQ="--ip-masq=false"
    DOCKER_OPT_MTU="--mtu=1450"
    DOCKER_NETWORK_OPTIONS=" --bip=10.254.35.1/24 --ip-masq=false --mtu=1450"
    [root@elasticsearch02 bin]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
    link/ether 52:54:00:a4:ca:ff brd ff:ff:ff:ff:ff:ff
    inet 10.2.8.65/24 brd 10.2.8.255 scope global eth0
    valid_lft forever preferred_lft forever
    3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
    link/ether 02:42:06:0a:ab:32 brd ff:ff:ff:ff:ff:ff
    inet 10.254.35.1/24 brd 10.254.35.255 scope global docker0
    valid_lft forever preferred_lft forever
    4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN
    link/ether 72:59:dc:2b:0a:21 brd ff:ff:ff:ff:ff:ff
    inet 10.254.35.0/32 scope global flannel.1
    valid_lft forever preferred_lft forever

    root@k8s-master ssl]# kubectl get nodes
    NAME STATUS ROLES AGE VERSION
    172.16.14.112 Ready <none> 6h1m v1.13.1
    172.16.14.115 Ready <none> 6h1m v1.13.1

  • 相关阅读:
    我的开源2009
    Kooboo CMS
    Visual Studio 2008丢失XML高亮和提示
    Kooboo完全介绍二:创建第一个Kooboo站点
    Telnet发送邮件和.NET邮件匿名发送组件
    C#中循环结构的效率问题 Virus
    从Android中Activity之间的通信说开来 Virus
    新浪微博开放平台开发android客户端(2) Virus
    MongoDB在Windows2003上得安装配置及使用 Virus
    消息提示的架构演进理论篇 Virus
  • 原文地址:https://www.cnblogs.com/hejianping/p/10917091.html
Copyright © 2020-2023  润新知