• k8s二进制部署


    下载kubernetes服务端

    [root@hdss7-21 ~]# cd /opt/src
    [root@hdss7-21 src]# wget https://dl.k8s.io/v1.15.2/kubernetes-server-linux-amd64.tar.gz
    
    [root@hdss7-21 src]# tar -xf kubernetes-server-linux-amd64.tar.gz 
    [root@hdss7-21 src]# mv kubernetes /opt/release/kubernetes-v1.15.2
    [root@hdss7-21 src]# ln -s /opt/release/kubernetes-v1.15.2 /opt/apps/kubernetes
    [root@hdss7-21 src]# ll /opt/apps/kubernetes
    lrwxrwxrwx 1 root root 31 Jan 6 12:59 /opt/apps/kubernetes -> /opt/release/kubernetes-v1.15.2
    
    [root@hdss7-21 src]# cd /opt/apps/kubernetes
    [root@hdss7-21 kubernetes]# rm -f kubernetes-src.tar.gz 
    [root@hdss7-21 kubernetes]# cd server/bin/
    [root@hdss7-21 bin]# rm -f *.tar *_tag # *.tar *_tag 镜像文件
    [root@hdss7-21 bin]# ll
    total 884636
    -rwxr-xr-x 1 root root 43534816 Aug 5 18:01 apiextensions-apiserver
    -rwxr-xr-x 1 root root 100548640 Aug 5 18:01 cloud-controller-manager
    -rwxr-xr-x 1 root root 200648416 Aug 5 18:01 hyperkube
    -rwxr-xr-x 1 root root 40182208 Aug 5 18:01 kubeadm
    -rwxr-xr-x 1 root root 164501920 Aug 5 18:01 kube-apiserver
    -rwxr-xr-x 1 root root 116397088 Aug 5 18:01 kube-controller-manager
    -rwxr-xr-x 1 root root 42985504 Aug 5 18:01 kubectl
    -rwxr-xr-x 1 root root 119616640 Aug 5 18:01 kubelet
    -rwxr-xr-x 1 root root 36987488 Aug 5 18:01 kube-proxy
    -rwxr-xr-x 1 root root 38786144 Aug 5 18:01 kube-scheduler
    -rwxr-xr-x 1 root root 1648224 Aug 5 18:01 mounter

    证书下发

    [root@hdss7-200 certs]# for i in 21 22;do echo hdss7-$i;ssh hdss7-$i "mkdir /opt/apps/kubernetes/server/bin/certs";scp apiserver-key.pem apiserver.pem ca-key.pem ca.pem client-key.pem client.pem hdss7-$i:/opt/apps/kubernetes/server/bin/certs/;done

    配置apiserver日志审计

    [root@hdss7-21 bin]# mkdir /opt/apps/kubernetes/conf
    [root@hdss7-21 bin]# vim /opt/apps/kubernetes/conf/audit.yaml # 打开文件后,设置 :set paste,避免自动缩进
    apiVersion: audit.k8s.io/v1beta1 # This is required.
    kind: Policy
    # Don't generate audit events for all requests in RequestReceived stage.
    omitStages:
    - "RequestReceived"
    rules:
    # Log pod changes at RequestResponse level
    - level: RequestResponse
    resources:
    - group: ""
    # Resource "pods" doesn't match requests to any subresource of pods,
    # which is consistent with the RBAC policy.
    resources: ["pods"]
    # Log "pods/log", "pods/status" at Metadata level
    - level: Metadata
    resources:
    - group: ""
    resources: ["pods/log", "pods/status"]
    
    # Don't log requests to a configmap called "controller-leader"
    - level: None
    resources:
    - group: ""
    resources: ["configmaps"]
    resourceNames: ["controller-leader"]
    
    # Don't log watch requests by the "system:kube-proxy" on endpoints or services
    - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
    resources: ["endpoints", "services"]
    
    # Don't log authenticated requests to certain non-resource URL paths.
    - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"
    
    # Log the request body of configmap changes in kube-system.
    - level: Request
    resources:
    - group: "" # core API group
    resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]
    
    # Log configmap and secret changes in all other namespaces at the Metadata level.
    - level: Metadata
    resources:
    - group: "" # core API group
    resources: ["secrets", "configmaps"]
    
    # Log all other resources in core and extensions at the Request level.
    - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.
    
    # A catch-all rule to log all other requests at the Metadata level.
    - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
    - "RequestReceived"

    创建启动脚本

    [root@hdss7-21 bin]# vim /opt/apps/kubernetes/server/bin/kube-apiserver-startup.sh
    #!/bin/bash
    
    WORK_DIR=$(dirname $(readlink -f $0))
    [ $? -eq 0 ] && cd $WORK_DIR || exit
    
    /opt/apps/kubernetes/server/bin/kube-apiserver 
    --apiserver-count 2 
    --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log 
    --audit-policy-file ../../conf/audit.yaml 
    --authorization-mode RBAC 
    --client-ca-file ./certs/ca.pem 
    --requestheader-client-ca-file ./certs/ca.pem 
    --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota 
    --etcd-cafile ./certs/ca.pem 
    --etcd-certfile ./certs/client.pem 
    --etcd-keyfile ./certs/client-key.pem 
    --etcd-servers https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 
    --service-account-key-file ./certs/ca-key.pem 
    --service-cluster-ip-range 192.168.0.0/16 
    --service-node-port-range 3000-29999 
    --target-ram-mb=1024 
    --kubelet-client-certificate ./certs/client.pem 
    --kubelet-client-key ./certs/client-key.pem 
    --log-dir /data/logs/kubernetes/kube-apiserver 
    --tls-cert-file ./certs/apiserver.pem 
    --tls-private-key-file ./certs/apiserver-key.pem 
    --v 2 

    配置supervisor启动配置

    [root@hdss7-21 bin]# vim /etc/supervisord.d/kube-apiserver.ini
    [program:kube-apiserver-7-21]
    command=/opt/apps/kubernetes/server/bin/kube-apiserver-startup.sh
    numprocs=1
    directory=/opt/apps/kubernetes/server/bin
    autostart=true
    autorestart=true
    startsecs=30
    startretries=3
    exitcodes=0,2
    stopsignal=QUIT
    stopwaitsecs=10
    user=root
    redirect_stderr=true
    stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log
    stdout_logfile_maxbytes=64MB
    stdout_logfile_backups=5
    stdout_capture_maxbytes=1MB
    stdout_events_enabled=false
    [root@hdss7-21 bin]# supervisorctl update
    [root@hdss7-21 bin]# supervisorctl status
    etcd-server-7-21 RUNNING pid 23637, uptime 22:26:08
    kube-apiserver-7-21 RUNNING pid 32591, uptime 0:05:37

    启停apiserver

    [root@hdss7-12 ~]# supervisorctl start kube-apiserver-7-21
    [root@hdss7-12 ~]# supervisorctl stop kube-apiserver-7-21
    [root@hdss7-12 ~]# supervisorctl restart kube-apiserver-7-21
    [root@hdss7-12 ~]# supervisorctl status kube-apiserver-7-21

    查看进程

    [root@hdss7-21 bin]# netstat -lntp|grep api
    tcp 0 0 127.0.0.1:8080 0.0.0.0:* LISTEN 32595/kube-apiserve 
    tcp6 0 0 :::6443 :::* LISTEN 32595/kube-apiserve 
    [root@hdss7-21 bin]# ps uax|grep kube-apiserver|grep -v grep
    root 32591 0.0 0.0 115296 1476 ? S 20:17 0:00 /bin/bash /opt/apps/kubernetes/server/bin/kube-apiserver-startup.sh
    root 32595 3.0 2.3 402720 184892 ? Sl 20:17 0:16 /opt/apps/kubernetes/server/bin/kube-apiserver --apiserver-count 2 --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log --audit-policy-file ../../conf/audit.yaml --authorization-mode RBAC --client-ca-file ./certs/ca.pem --requestheader-client-ca-file ./certs/ca.pem --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota --etcd-cafile ./certs/ca.pem --etcd-certfile ./certs/client.pem --etcd-keyfile ./certs/client-key.pem --etcd-servers https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 --service-account-key-file ./certs/ca-key.pem --service-cluster-ip-range 192.168.0.0/16 --service-node-port-range 3000-29999 --target-ram-mb=1024 --kubelet-client-certificate ./certs/client.pem --kubelet-client-key ./certs/client-key.pem --log-dir /data/logs/kubernetes/kube-apiserver --tls-cert-file ./certs/apiserver.pem --tls-private-key-file ./certs/apiserver-key.pem --v 2

    配置apiserver L4代理

    [root@hdss7-11 ~]# yum install -y nginx
    [root@hdss7-11 ~]# vim /etc/nginx/nginx.conf 
    # 末尾加上以下内容,stream 只能加在 main 中
    # 此处只是简单配置下nginx,实际生产中,建议进行更合理的配置
    stream {
    log_format proxy '$time_local|$remote_addr|$upstream_addr|$protocol|$status|'
    '$session_time|$upstream_connect_time|$bytes_sent|$bytes_received|'
    '$upstream_bytes_sent|$upstream_bytes_received' ;
    
    upstream kube-apiserver {
    server 10.4.7.21:6443 max_fails=3 fail_timeout=30s;
    server 10.4.7.22:6443 max_fails=3 fail_timeout=30s;
    }
    server {
    listen 7443;
    proxy_connect_timeout 2s;
    proxy_timeout 900s;
    proxy_pass kube-apiserver;
    access_log /var/log/nginx/proxy.log proxy;
    }
    }
    [root@hdss7-11 ~]# systemctl start nginx; systemctl enable nginx
    [root@hdss7-11 ~]# curl 127.0.0.1:7443 # 测试几次
    Client sent an HTTP request to an HTTPS server.
    [root@hdss7-11 ~]# cat /var/log/nginx/proxy.log 
    06/Jan/2020:21:00:27 +0800|127.0.0.1|10.4.7.21:6443|TCP|200|0.001|0.000|76|78|78|76
    06/Jan/2020:21:05:03 +0800|127.0.0.1|10.4.7.22:6443|TCP|200|0.020|0.019|76|78|78|76
    06/Jan/2020:21:05:04 +0800|127.0.0.1|10.4.7.21:6443|TCP|200|0.001|0.001|76|78|78|76

    安装keepalived

    [root@hdss7-11 ~]# yum install -y keepalived
    [root@hdss7-11 ~]# vim /etc/keepalived/check_port.sh # 配置检查脚本
    #!/bin/bash
    if [ $# -eq 1 ] && [[ $1 =~ ^[0-9]+ ]];then
    [ $(netstat -lntp|grep ":$1 " |wc -l) -eq 0 ] && echo "[ERROR] nginx may be not running!" && exit 1 || exit 0
    else
    echo "[ERROR] need one port!"
    exit 1
    fi
    [root@hdss7-11 ~]# chmod +x /etc/keepalived/check_port.sh 

    配置主节点:/etc/keepalived/keepalived.conf

    ! Configuration File for keepalived
    global_defs {
    router_id 10.4.7.11
    }
    vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
    }
    vrrp_instance VI_1 {
    state MASTER
    interface ens32
    virtual_router_id 251
    priority 100
    advert_int 1
    mcast_src_ip 10.4.7.11
    nopreempt
    
    authentication {
    auth_type PASS
    auth_pass 11111111
    }
    track_script {
    chk_nginx
    }
    virtual_ipaddress {
    10.4.7.10
    }
    }

    配置备节点:/etc/keepalived/keepalived.conf

    ! Configuration File for keepalived
    global_defs {
    router_id 10.4.7.12
    }
    vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
    }
    vrrp_instance VI_1 {
    state BACKUP
    interface ens32
    virtual_router_id 251
    mcast_src_ip 10.4.7.12
    priority 90
    advert_int 1
    authentication {
    auth_type PASS
    auth_pass 11111111
    }
    track_script {
    chk_nginx
    }
    virtual_ipaddress {
    10.4.7.10
    }
    }
    ! Configuration File for keepalived
    global_defs {
    router_id 10.4.7.12
    }
    vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
    }
    vrrp_instance VI_1 {
    state BACKUP
    interface ens32
    virtual_router_id 251
    mcast_src_ip 10.4.7.12
    priority 90
    advert_int 1
    authentication {
    auth_type PASS
    auth_pass 11111111
    }
    track_script {
    chk_nginx
    }
    virtual_ipaddress {
    10.4.7.10
    }
    }

    启动keepalived

    [root@hdss7-11 ~]# systemctl start keepalived ; systemctl enable keepalived
    [root@hdss7-11 ~]# ip addr show ens32
    2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:6d:b8:82 brd ff:ff:ff:ff:ff:ff
    inet 10.4.7.11/24 brd 10.4.7.255 scope global noprefixroute ens32
    valid_lft forever preferred_lft forever
    inet 10.4.7.10/32 scope global ens32
    valid_lft forever preferred_lft forever
    ......

    controller-manager 安装

    创建启动脚本

    [root@hdss7-21 ~]# vim /opt/apps/kubernetes/server/bin/kube-controller-manager-startup.sh
    #!/bin/sh
    WORK_DIR=$(dirname $(readlink -f $0))
    [ $? -eq 0 ] && cd $WORK_DIR || exit
    
    /opt/apps/kubernetes/server/bin/kube-controller-manager 
    --cluster-cidr 172.7.0.0/16 
    --leader-elect true 
    --log-dir /data/logs/kubernetes/kube-controller-manager 
    --master http://127.0.0.1:8080 
    --service-account-private-key-file ./certs/ca-key.pem 
    --service-cluster-ip-range 192.168.0.0/16 
    --root-ca-file ./certs/ca.pem 
    --v 2
    [root@hdss7-21 ~]# chmod u+x /opt/apps/kubernetes/server/bin/kube-controller-manager-startup.sh

    配置supervisor启动配置

    [root@hdss7-21 ~]# vim /etc/supervisord.d/kube-controller-manager.ini
    [program:kube-controller-manager-7-21]
    command=/opt/apps/kubernetes/server/bin/kube-controller-manager-startup.sh ; the program (relative uses PATH, can take args)
    numprocs=1 ; number of processes copies to start (def 1)
    directory=/opt/apps/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
    autostart=true ; start at supervisord start (default: true)
    autorestart=true ; retstart at unexpected quit (default: true)
    startsecs=30 ; number of secs prog must stay running (def. 1)
    startretries=3 ; max # of serial start failures (default 3)
    exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT ; signal used to kill process (default TERM)
    stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
    user=root ; setuid to this UNIX account to run the program
    redirect_stderr=true ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log ; stderr log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false ; emit events on stdout writes (default false)
    
    [root@hdss7-21 ~]# supervisorctl update
    kube-controller-manager-7-21: stopped
    kube-controller-manager-7-21: updated process group
    [root@hdss7-21 ~]# supervisorctl status
    etcd-server-7-21 RUNNING pid 23637, uptime 1 day, 0:16:54
    kube-apiserver-7-21 RUNNING pid 32591, uptime 1:56:23
    kube-controller-manager-7-21 RUNNING pid 33357, uptime 0:00:38

    kube-scheduler安装
    创建启动脚本

    [root@hdss7-21 ~]# vim /opt/apps/kubernetes/server/bin/kube-scheduler-startup.sh
    #!/bin/sh
    WORK_DIR=$(dirname $(readlink -f $0))
    [ $? -eq 0 ] && cd $WORK_DIR || exit
    
    /opt/apps/kubernetes/server/bin/kube-scheduler 
    --leader-elect 
    --log-dir /data/logs/kubernetes/kube-scheduler 
    --master http://127.0.0.1:8080 
    --v 2
    [root@hdss7-21 ~]# chmod u+x /opt/apps/kubernetes/server/bin/kube-scheduler-startup.sh
    [root@hdss7-21 ~]# mkdir -p /data/logs/kubernetes/kube-scheduler

    配置supervisor启动配置

    [root@hdss7-21 ~]# vim /etc/supervisord.d/kube-scheduler.ini
    [program:kube-scheduler-7-21]
    command=/opt/apps/kubernetes/server/bin/kube-scheduler-startup.sh 
    numprocs=1 
    directory=/opt/apps/kubernetes/server/bin 
    autostart=true 
    autorestart=true 
    startsecs=30 
    startretries=3 
    exitcodes=0,2 
    stopsignal=QUIT 
    stopwaitsecs=10 
    user=root 
    redirect_stderr=true 
    stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log 
    stdout_logfile_maxbytes=64MB 
    stdout_logfile_backups=4 
    stdout_capture_maxbytes=1MB 
    stdout_events_enabled=false
    
    [root@hdss7-21 ~]# supervisorctl update
    kube-scheduler-7-21: stopped
    kube-scheduler-7-21: updated process group
    [root@hdss7-21 ~]# supervisorctl status 
    etcd-server-7-21 RUNNING pid 23637, uptime 1 day, 0:26:53
    kube-apiserver-7-21 RUNNING pid 32591, uptime 2:06:22
    kube-controller-manager-7-21 RUNNING pid 33357, uptime 0:10:37
    kube-scheduler-7-21 RUNNING pid 33450, uptime 0:01:18 

    检查主控节点状态

    [root@hdss7-21 ~]# ln -s /opt/apps/kubernetes/server/bin/kubectl /usr/local/bin/
    [root@hdss7-21 ~]# kubectl get cs
    NAME STATUS MESSAGE ERROR
    scheduler Healthy ok 
    controller-manager Healthy ok 
    etcd-1 Healthy {"health": "true"} 
    etcd-0 Healthy {"health": "true"} 
    etcd-2 Healthy {"health": "true"}
    
    [root@hdss7-22 ~]# ln -s /opt/apps/kubernetes/server/bin/kubectl /usr/local/bin/
    [root@hdss7-22 ~]# kubectl get cs
    NAME STATUS MESSAGE ERROR
    controller-manager Healthy ok 
    scheduler Healthy ok 
    etcd-2 Healthy {"health": "true"} 
    etcd-1 Healthy {"health": "true"} 
    etcd-0 Healthy {"health": "true"}
  • 相关阅读:
    「赛后总结」Codeforces Round #680 (Div. 2)
    雲雀
    「题解」洛谷 P1494 [国家集训队]小Z的袜子
    NOIP 2020 退役记
    任务查询系统「主席树+差分」
    组合「欧拉路」
    AtCoder 123 Triangle「思维题」
    旅行(加强版)「基环树」
    一个简单的询问「莫队」
    [HNOI2012]永无乡「线段树合并」
  • 原文地址:https://www.cnblogs.com/Wang-Hongwei/p/14145242.html
Copyright © 2020-2023  润新知