• kubernetes集群环境搭建(7)


    kubernetes集群环境搭建(7)

    一、kubernetes集群环境搭建

    1.1.1 安装部署node节点服务kube-proxy

    1 .安装部署kube-proxy服务主机规划

    主机 角色 ip
    hdss-21 kube-proxy 10.0.0.21
    hdss-22 kube-proxy 10.0.0.22
    1. 签发kube-proxy证书(hdss-201)
    创建生成证书签名请求(csr)的JSON配置文件
    [root@hdss-201.host.com /opt/certs]# vim /opt/certs/kube-proxy-csr.json
    [root@hdss-201.host.com /opt/certs]# cat /opt/certs/kube-proxy-csr.json
    {
        "CN": "system:kube-proxy",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "guizhou",
                "L": "guiyang",
                "O": "od",
                "OU": "ops"
            }
        ]
    }
    
    生成证书
    [root@hdss-201.host.com /opt/certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssl-json -bare kube-proxy-client
    2020/06/15 23:49:42 [INFO] generate received request
    2020/06/15 23:49:42 [INFO] received CSR
    2020/06/15 23:49:42 [INFO] generating key: rsa-2048
    2020/06/15 23:49:42 [INFO] encoded CSR
    2020/06/15 23:49:42 [INFO] signed certificate with serial number 438100298835396628078565727365080699742028203116
    2020/06/15 23:49:42 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    [root@hdss-201.host.com /opt/certs]# ll
    total 100
    -rw-r--r-- 1 root root 1249 Jun 13 21:35 apiserver.csr
    -rw-r--r-- 1 root root  566 Jun 13 21:31 apiserver-csr.json
    -rw------- 1 root root 1679 Jun 13 21:35 apiserver-key.pem
    -rw-r--r-- 1 root root 1598 Jun 13 21:35 apiserver.pem
    -rw-r--r-- 1 root root  840 Jun 12 21:24 ca-config.json
    -rw-r--r-- 1 root root  993 Jun 10 21:49 ca.csr
    -rw-r--r-- 1 root root  345 Jun 10 21:48 ca-csr.json
    -rw------- 1 root root 1675 Jun 10 21:49 ca-key.pem
    -rw-r--r-- 1 root root 1346 Jun 10 21:49 ca.pem
    -rw-r--r-- 1 root root  993 Jun 13 21:23 client.csr
    -rw-r--r-- 1 root root  280 Jun 13 21:22 client-csr.json
    -rw------- 1 root root 1675 Jun 13 21:23 client-key.pem
    -rw-r--r-- 1 root root 1363 Jun 13 21:23 client.pem
    -rw-r--r-- 1 root root 1062 Jun 12 21:33 etcd-peer.csr
    -rw-r--r-- 1 root root  363 Jun 12 21:27 etcd-peer-csr.json
    -rw------- 1 root root 1679 Jun 12 21:33 etcd-peer-key.pem
    -rw-r--r-- 1 root root 1428 Jun 12 21:33 etcd-peer.pem
    -rw-r--r-- 1 root root 1115 Jun 15 21:13 kubelet.csr
    -rw-r--r-- 1 root root  451 Jun 15 21:13 kubelet-csr.json
    -rw------- 1 root root 1675 Jun 15 21:13 kubelet-key.pem
    -rw-r--r-- 1 root root 1468 Jun 15 21:13 kubelet.pem
    -rw-r--r-- 1 root root 1005 Jun 15 23:49 kube-proxy-client.csr
    -rw------- 1 root root 1675 Jun 15 23:49 kube-proxy-client-key.pem
    -rw-r--r-- 1 root root 1375 Jun 15 23:49 kube-proxy-client.pem
    -rw-r--r-- 1 root root  267 Jun 15 23:47 kube-proxy-csr.json
    
    1. 拷贝证书、私钥
    #21
    [root@hdss-21.host.com /opt/kubernetes/server/bin/certs]# scp hdss-201:/opt/certs/kube-proxy-client-key.pem ./
    root@hdss-201's password: 
    kube-proxy-client-key.pem                                                                          100% 1675     2.1MB/s   00:00    
    [root@hdss-21.host.com /opt/kubernetes/server/bin/certs]# scp hdss-201:/opt/certs/kube-proxy-client.pem ./
    root@hdss-201's password: 
    kube-proxy-client.pem                                                                              100% 1375     1.5MB/s   00:00    
    [root@hdss-21.host.com /opt/kubernetes/server/bin/certs]# ll 
    total 40
    -rw------- 1 root root 1679 Jun 13 21:49 apiserver-key.pem
    -rw-r--r-- 1 root root 1598 Jun 13 21:48 apiserver.pem
    -rw------- 1 root root 1675 Jun 13 21:47 ca-key.pem
    -rw-r--r-- 1 root root 1346 Jun 13 21:46 ca.pem
    -rw------- 1 root root 1675 Jun 13 21:48 client-key.pem
    -rw-r--r-- 1 root root 1363 Jun 13 21:48 client.pem
    -rw------- 1 root root 1675 Jun 15 21:13 kubelet-key.pem
    -rw-r--r-- 1 root root 1468 Jun 15 21:13 kubelet.pem
    -rw------- 1 root root 1675 Jun 15 23:52 kube-proxy-client-key.pem
    -rw-r--r-- 1 root root 1375 Jun 15 23:52 kube-proxy-client.pem
    
    #22操作相同
    
    1. 创建配置文件(分四步)
    #21
    1、set-cluster
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# kubectl config set-cluster myk8s 
    >   --certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem 
    >   --embed-certs=true 
    >   --server=https://10.0.0.10:7443 
    >   --kubeconfig=kube-proxy.kubeconfig
    Cluster "myk8s" set.
    
    2、set-credentials
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# kubectl config set-credentials kube-proxy 
    >   --client-certificate=/opt/kubernetes/server/bin/certs/kube-proxy-client.pem 
    >   --client-key=/opt/kubernetes/server/bin/certs/kube-proxy-client-key.pem 
    >   --embed-certs=true 
    >   --kubeconfig=kube-proxy.kubeconfig
    User "kube-proxy" set.
    
    
    3、set-context
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# kubectl config set-context myk8s-context 
    >   --cluster=myk8s 
    >   --user=kube-proxy 
    >   --kubeconfig=kube-proxy.kubeconfig
    Context "myk8s-context" created.
    
    
    4、use-context
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig
    Switched to context "myk8s-context".
    
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# ll
    total 24
    -rw-r--r-- 1 root root 2223 Jun 13 22:18 audit.yaml
    -rw-r--r-- 1 root root  258 Jun 15 21:47 k8s-node.yaml
    -rw------- 1 root root 6195 Jun 15 21:40 kubelet.kubeconfig
    -rw------- 1 root root 6215 Jun 16 00:02 kube-proxy.kubeconfig
    
    #22从21拷贝过去即可
    [root@hdss-22.host.com /opt/kubernetes/server/bin/config]# scp hdss-21:/opt/kubernetes/server/bin/config/kube-proxy.kubeconfig ./
    root@hdss-21's password: 
    kube-proxy.kubeconfig                                                                              100% 6215     5.8MB/s   00:00    
    [root@hdss-22.host.com /opt/kubernetes/server/bin/config]# ll
    total 20
    -rw-r--r-- 1 root root 2223 Apr 27 13:49 audit.yaml
    -rw------- 1 root root 6195 Jun 15 21:40 kubelet.kubeconfig
    -rw------- 1 root root 6215 Jun 16 00:05 kube-proxy.kubeconfig
    
    
    1. 加载ipvs模块 -- 脚本需要设置成开启自动运行
    [root@hdss-21.host.com ~]# vim ipvs.sh
    [root@hdss-21.host.com ~]# cat ipvs.sh 
    #!/bin/bash
    ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
    for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
    do
      /sbin/modinfo -F filename $i &>/dev/null
      if [ $? -eq 0 ];then
        /sbin/modprobe $i
      fi
    done
    
    [root@hdss-21.host.com ~]# chmod +x ipvs.sh 
    [root@hdss-21.host.com ~]# ll
    total 8
    -rw-------. 1 root root 1525 Mar 26 22:43 anaconda-ks.cfg
    -rwxr-xr-x  1 root root  231 Apr 27 13:49 ipvs.sh
    [root@hdss-21.host.com ~]# sh ipvs.sh 
    
    检查是否加载了ipvs模块
    [root@hdss-21.host.com ~]# lsmod | grep ip_vs
    ip_vs_wrr              12697  0 
    ip_vs_wlc              12519  0 
    ip_vs_sh               12688  0 
    ip_vs_sed              12519  0 
    ip_vs_rr               12600  0 
    ip_vs_pe_sip           12740  0 
    nf_conntrack_sip       33780  1 ip_vs_pe_sip
    ip_vs_nq               12516  0 
    ip_vs_lc               12516  0 
    ip_vs_lblcr            12922  0 
    ip_vs_lblc             12819  0 
    ip_vs_ftp              13079  0 
    ip_vs_dh               12688  0 
    ip_vs                 145497  24 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_pe_sip,ip_vs_lblcr,ip_vs_lblc
    nf_nat                 26583  3 ip_vs_ftp,nf_nat_ipv4,nf_nat_masquerade_ipv4
    nf_conntrack          139224  8 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_sip,nf_conntrack_ipv4
    libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack
    
    设置开机自动启动
    [root@hdss-21.host.com ~]# vim /etc/rc.d/rc.local
    [root@hdss-21.host.com ~]# chmod +x  /etc/rc.d/rc.local 
    [root@hdss-21.host.com ~]# ll /etc/rc.d/rc.local
    -rwxr-xr-x 1 root root 500 Jun 16 00:39 /etc/rc.d/rc.local
    [root@hdss-21.host.com ~]# cat /etc/rc.d/rc.local
    #!/bin/bash
    # THIS FILE IS ADDED FOR COMPATIBILITY PURPOSES
    #
    # It is highly advisable to create own systemd services or udev rules
    # to run scripts during boot instead of using this file.
    #
    # In contrast to previous versions due to parallel execution during boot
    # this script will NOT be run after all other services.
    #
    # Please note that you must run 'chmod +x /etc/rc.d/rc.local' to ensure
    # that this script will be executed during boot.
    
    touch /var/lock/subsys/local
    /usr/bin/sh /root/ipvs.sh 
    
    #22同上操作
    
    1. 写启动脚本
    #21 
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# vim kube-proxy.sh 
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# cat kube-proxy.sh
    #!/bin/sh
    ./kube-proxy 
      --cluster-cidr 172.7.0.0/16 
      --hostname-override hdss-21.host.com 
      --proxy-mode=ipvs 
      --ipvs-scheduler=nq 
      --kubeconfig ./config/kube-proxy.kubeconfig
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# chmod +x kube-proxy.sh
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# ll kube-proxy.sh
    -rwxr-xr-x 1 root root 190 Jun 16 00:43 kube-proxy.sh
    
    
    创建数据目录和日志目录
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# mkdir -p /data/logs/kubernetes/kube-proxy
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# ll /data/logs/kubernetes/kube-proxy
    total 0
    
    #22操作同上
    
    

    7.supervisor后台管理启动

    [[root@hdss-21.host.com /opt/kubernetes/server/bin]# vim /etc/supervisord.d/kube-proxy.ini
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# cat /etc/supervisord.d/kube-proxy.ini
    [program:kube-proxy-7-21]
    command=/opt/kubernetes/server/bin/kube-proxy.sh                     ; the program (relative uses PATH, can take args)
    numprocs=1                                                           ; number of processes copies to start (def 1)
    directory=/opt/kubernetes/server/bin                                 ; directory to cwd to before exec (def no cwd)
    autostart=true                                                       ; start at supervisord start (default: true)
    autorestart=true                                                     ; retstart at unexpected quit (default: true)
    startsecs=30                                                         ; number of secs prog must stay running (def. 1)
    startretries=3                                                       ; max # of serial start failures (default 3)
    exitcodes=0,2                                                        ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT                                                      ; signal used to kill process (default TERM)
    stopwaitsecs=10                                                      ; max num secs to wait b4 SIGKILL (default 10)
    user=root                                                            ; setuid to this UNIX account to run the program
    redirect_stderr=true                                                 ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log     ; stderr log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB                                         ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4                                             ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB                                          ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false                                          ; emit events on stdout writes (default false)
    
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# supervisorctl update
    kube-proxy-21: added process group
    
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# supervisorctl status
    etcd-server-21                   RUNNING   pid 1455, uptime 0:00:00
    kube-apiserver-21                RUNNING   pid 1452, uptime 0:00:00
    kube-controller-manager-21       RUNNING   pid 1224, uptime 0:00:00
    kube-kubelet-21                  RUNNING   pid 2053, uptime 2:17:37
    kube-proxy-21                    RUNNING   pid 24234, uptime 0:35:07
    kube-scheduler-21                RUNNING   pid 1226, uptime 0:00:00
    
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# netstat -luntp |grep kube-proxy
    tcp        0      0 127.0.0.1:10249         0.0.0.0:*               LISTEN      24012/./kube-proxy  
    tcp6       0      0 :::10256                :::*                    LISTEN      24012/./kube-proxy  
    
    #22操作同上
    

    8.下载ipvsadm(只安装,不启动)

    [root@hdss-21.host.com /opt/kubernetes/server/bin]# yum install ipvsadm -y
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# ipvsadm -Ln
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    TCP  192.168.0.1:443 nq
      -> 10.0.0.21:6443               Masq    1      0          0         
      -> 10.0.0.22:6443               Masq    1      0          0      
    [root@hdss-22.host.com /opt/kubernetes/server/bin]# yum install ipvsadm -y
    [root@hdss-22.host.com /opt/kubernetes/server/bin]# ipvsadm -Ln
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    TCP  192.168.0.1:443 nq
      -> 10.0.0.21:6443               Masq    1      0          0         
      -> 10.0.0.22:6443               Masq    1      0          0   
      [root@hdss-21.host.com /opt/kubernetes/server/bin]# kubectl get svc
    NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
    kubernetes   ClusterIP   192.168.0.1   <none>        443/TCP   2d2h
    
    

    二、验证kubernetes集群

    2.1.1 验证kubernetes集群

    1. 在任意一个运算节点,创建一个资源配置清单
    #21
    [root@hdss-21.host.com ~]# vim /root/nginx-ds.yaml
    [root@hdss-21.host.com ~]# cat /root/nginx-ds.yaml
    apiVersion: extensions/v1beta1
    kind: DaemonSet
    metadata:
      name: nginx-ds
    spec:
      template:
        metadata:
          labels:
            app: nginx-ds
        spec:
          containers:
          - name: my-nginx
            image: harbor.od.com/public/nginx:v1.7.9
            ports:
            - containerPort: 80
    
    [root@hdss-21.host.com ~]# kubectl create -f nginx-ds.yaml 
    daemonset.extensions/nginx-ds created
    
    [root@hdss-21.host.com ~]# kubectl get pods
    NAME             READY   STATUS    RESTARTS   AGE
    nginx-ds-8cv76   1/1     Running   0          20h
    nginx-ds-gt26s   1/1     Running   0          20h
    
    [root@hdss-21.host.com ~]# kubectl get pods -o wide
    NAME             READY   STATUS    RESTARTS   AGE   IP          NODE               NOMINATED NODE   READINESS GATES
    nginx-ds-8cv76   1/1     Running   0          20h   10.0.21.2   hdss-21.host.com   <none>           <none>
    nginx-ds-gt26s   1/1     Running   0          20h   10.0.22.2   hdss-22.host.com   <none>           <none>
    
  • 相关阅读:
    Create方法失效而没有提示错误信息
    JS弹出窗口控制
    本周活动
    JavaScript的初步了解
    关于PHP接收文件的资料
    mvc模式改进网站结构
    一周动态
    排序
    Java的内存泄漏
    Android笔记
  • 原文地址:https://www.cnblogs.com/woaiyunwei/p/13162894.html
Copyright © 2020-2023  润新知