• 5 搭建apiserver


    安装apiserver

    安装部署主控节点服务 -- apiserver
    
    部署kube-apiserver集群
    集群规划
    
    主机名			角色			ip
    rstx-203.rongbiz.cn	kube-apiserver		192.168.1.203
    rstx-204.rongbiz.cn	kube-apiserver		192.168.1.204
    rstx-201.rongbiz.cn	4层负载均衡		192.168.1.201
    rstx-202.rongbiz.cn	4层负载均衡		192.168.1.202
    
    注意:这里192.168.1.201和192.168.1.202使用nginx做4层负载均衡器,用keepalive跑一个vip:192.168.1.110,代理两个kube-apiserver,实现高可用
    
    这里部署文档以HDSS7-14.host.com主机为例,另外一台运算节点部署方法类似
    
    
    
    下载软件,解压,做软连接,安装1.15.2
    [root@rstx-203 ~]# cd /opt/src/
    [root@rstx-203 src]# ls
    kubernetes-server-linux-amd64-v1.15.2.tar.gz
    
    查看文件大小
    [root@rstx-203 src]# du -sh kubernetes-server-linux-amd64-v1.15.2.tar.gz
    
    [root@rstx-203 src]# tar xf kubernetes-server-linux-amd64-v1.15.4.tar.gz -C /opt
    [root@rstx-203 src]# cd ..
    [root@rstx-203 opt]# mv kubernetes/ kubernetes-v1.15.2
    
    做软连接,方便以后更新
    [root@rstx-203 opt]# ln -s /opt/kubernetes-v1.15.2/ /opt/kubernetes
    [root@rstx-203 opt]# ll
    总用量 0
    drwx--x--x 4 root root  28 12月 10 14:28 containerd
    lrwxrwxrwx 1 root root  17 12月 10 16:45 etcd -> /opt/etcd-v3.1.20
    drwxr-xr-x 4 etcd etcd 166 12月 10 17:43 etcd-v3.1.20
    lrwxrwxrwx 1 root root  24 12月 10 18:33 kubernetes -> /opt/kubernetes-v1.15.2/
    drwxr-xr-x 4 root root  79 8月   5 18:01 kubernetes-v1.15.2
    drwxr-xr-x 2 root root  97 12月 10 18:29 src
    
    [root@rstx-203 opt]# cd kubernetes
    [root@rstx-203 kubernetes]# ls
    addons  kubernetes-src.tar.gz  LICENSES  server
    
    删除源码包
    [root@rstx-203 kubernetes]# rm -rf kubernetes-src.tar.gz 
    
    [root@rstx-203 kubernetes]# cd server/bin/
    
    删除没用的文件docker镜像等
    [root@rstx-203 bin]# rm -rf *.tar
    [root@rstx-203 bin]# rm -rf *_tag 
    
    剩余一些可执行文件
    [root@rstx-203 bin]# ll
    总用量 884636
    -rwxr-xr-x 1 root root  43534816 8月   5 18:01 apiextensions-apiserver
    -rwxr-xr-x 1 root root 100548640 8月   5 18:01 cloud-controller-manager
    -rwxr-xr-x 1 root root 200648416 8月   5 18:01 hyperkube
    -rwxr-xr-x 1 root root  40182208 8月   5 18:01 kubeadm
    -rwxr-xr-x 1 root root 164501920 8月   5 18:01 kube-apiserver
    -rwxr-xr-x 1 root root 116397088 8月   5 18:01 kube-controller-manager
    -rwxr-xr-x 1 root root  42985504 8月   5 18:01 kubectl
    -rwxr-xr-x 1 root root 119616640 8月   5 18:01 kubelet
    -rwxr-xr-x 1 root root  36987488 8月   5 18:01 kube-proxy
    -rwxr-xr-x 1 root root  38786144 8月   5 18:01 kube-scheduler
    -rwxr-xr-x 1 root root   1648224 8月   5 18:01 mounter
    
    
    签发apiserver-client证书:apiserver与etc通信用的证书。apiserver是客户端,etcd是服务端
    运维主机HDSS-248.host.com上
    
    创建生成证书签名请求(csr)的JSON配置文件	-- 此目录下有,直接上传修改,不要粘贴复制
    [root@rstx-53 ~]# cd /opt/certs/
    
    [root@rstx-53 ~]# vi /opt/certs/client-csr.json
    {
        "CN": "k8s-node",
        "hosts": [
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "beijing",
                "L": "beijing",
                "O": "od",
                "OU": "ops"
            }
        ]
    }
    
    [root@rstx-53 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json |cfssl-json -bare client
    
    
    
    创建签名请求(csr)的JSON配置文件,apiserver,server端证书 -- 直接上传
    [root@rstx-53 certs]# vi apiserver-csr.json
    {
        "CN": "k8s-apiserver",
        "hosts": [
            "127.0.0.1",
            "10.254.0.1",
            "kubernetes.default",
            "kubernetes.default.svc",
            "kubernetes.default.svc.cluster",
            "kubernetes.default.svc.cluster.local",
            "192.168.1.200",
            "192.168.1.203",
            "192.168.1.204",
            "192.168.1.205"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "beijing",
                "L": "beijing",
                "O": "od",
                "OU": "ops"
            }
        ]
    }
    
    
    [root@rstx-53 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json |cfssl-json -bare apiserver
    
    [root@rstx-53 certs]# ll
    总用量 80
    total 24
    -rw------- 1 root root 1679 Nov 10 17:42 apiserver-key.pem
    -rw-r--r-- 1 root root 1598 Nov 10 17:42 apiserver.pem
    -rw------- 1 root root 1679 Nov 10 17:41 ca-key.pem
    -rw-r--r-- 1 root root 1346 Nov 10 17:41 ca.pem
    -rw------- 1 root root 1675 Nov 10 17:41 client-key.pem
    -rw-r--r-- 1 root root 1363 Nov 10 17:41 client.pem
    
    
    拷贝证书
    [root@rstx-203 ~]# cd /opt/kubernetes/server/bin/
    [root@rstx-203 bin]# mkdir certs
    [root@rstx-203 bin]# cd certs/
    最后有个点,表示拷贝到当前目录
    
    scp -rp root@192.168.1.53:/opt/certs/{ca.pem,ca-key.pem,client.pem,client-key.pem,apiserver.pem,apiserver-key.pem}  .
    
    [root@rstx-203 certs]# scp rstx-53:/opt/certs/ca.pem . 
    [root@rstx-203 certs]# scp rstx-53:/opt/certs/ca-key.pem .
    [root@rstx-203 certs]# scp rstx-53:/opt/certs/client.pem .
    [root@rstx-203 certs]# scp rstx-53:/opt/certs/client-key.pem .
    [root@rstx-203 certs]# scp rstx-53:/opt/certs/apiserver.pem .
    [root@rstx-203 certs]# scp rstx-53:/opt/certs/apiserver-key.pem .
    
    创建启动配置脚本 -- 直接上传
    [root@rstx-203 certs]# cd /opt/kubernetes/server/bin
    [root@rstx-203 bin]# mkdir conf
    [root@rstx-203 bin]# cd conf/
    [root@rstx-203 conf]# 
    [root@rstx-203 conf]# vi audit.yaml
    apiVersion: audit.k8s.io/v1beta1 # This is required.
    kind: Policy
    # Don't generate audit events for all requests in RequestReceived stage.
    omitStages:
      - "RequestReceived"
    rules:
      # Log pod changes at RequestResponse level
      - level: RequestResponse
        resources:
        - group: ""
          # Resource "pods" doesn't match requests to any subresource of pods,
          # which is consistent with the RBAC policy.
          resources: ["pods"]
      # Log "pods/log", "pods/status" at Metadata level
      - level: Metadata
        resources:
        - group: ""
          resources: ["pods/log", "pods/status"]
    
      # Don't log requests to a configmap called "controller-leader"
      - level: None
        resources:
        - group: ""
          resources: ["configmaps"]
          resourceNames: ["controller-leader"]
    
      # Don't log watch requests by the "system:kube-proxy" on endpoints or services
      - level: None
        users: ["system:kube-proxy"]
        verbs: ["watch"]
        resources:
        - group: "" # core API group
          resources: ["endpoints", "services"]
    
      # Don't log authenticated requests to certsain non-resource URL paths.
      - level: None
        userGroups: ["system:authenticated"]
        nonResourceURLs:
        - "/api*" # Wildcard matching.
        - "/version"
    
      # Log the request body of configmap changes in kube-system.
      - level: Request
        resources:
        - group: "" # core API group
          resources: ["configmaps"]
        # This rule only applies to resources in the "kube-system" namespace.
        # The empty string "" can be used to select non-namespaced resources.
        namespaces: ["kube-system"]
    
      # Log configmap and secret changes in all other namespaces at the Metadata level.
      - level: Metadata
        resources:
        - group: "" # core API group
          resources: ["secrets", "configmaps"]
    
      # Log all other resources in core and extensions at the Request level.
      - level: Request
        resources:
        - group: "" # core API group
        - group: "extensions" # Version of group should NOT be included.
    
      # A catch-all rule to log all other requests at the Metadata level.
      - level: Metadata
        # Long-running requests like watches that fall under this rule will not
        # generate an audit event in RequestReceived.
        omitStages:
          - "RequestReceived"
    
    编写启动脚本	-- 直接上传
    [root@rstx-203 config]# vi /opt/kubernetes/server/bin/kube-apiserver.sh
    #!/bin/bash
    ./kube-apiserver 
      --apiserver-count 2 
      --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log 
      --audit-policy-file ./conf/audit.yaml 
      --authorization-mode RBAC 
      --client-ca-file ./certs/ca.pem 
      --requestheader-client-ca-file ./certs/ca.pem 
      --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota 
      --etcd-cafile ./certs/ca.pem 
      --etcd-certfile ./certs/client.pem 
      --etcd-keyfile ./certs/client-key.pem 
      --etcd-servers https://192.168.1.202:2379,https://192.168.1.203:2379,https://192.168.1.204:2379 
      --service-account-key-file ./certs/ca-key.pem 
      --service-cluster-ip-range 10.254.0.0/16 
      --service-node-port-range 3000-29999 
      --target-ram-mb=1024 
      --kubelet-client-certificate ./certs/client.pem 
      --kubelet-client-key ./certs/client-key.pem 
      --log-dir  /data/logs/kubernetes/kube-apiserver 
      --tls-cert-file ./certs/apiserver.pem 
      --tls-private-key-file ./certs/apiserver-key.pem 
      --v 2
    
    
    查看帮助命令,查看每行的意思
    [root@rstx-203 bin]# ./kube-apiserver --help|grep -A 5 target-ram-mb 
    
    添加执行权限
    [root@rstx-203 bin]# chmod +x kube-apiserver.sh
    
    [root@rstx-203 bin]# mkdir /data/logs/kubernetes/kube-apiserver -p
    
    创建后台启动
    [root@rstx-203 bin]# vi /etc/supervisord.d/kube-apiserver.ini
    [program:kube-apiserver-203]					# 21根据实际IP地址更改
    command=/opt/kubernetes/server/bin/kube-apiserver.sh            ; the program (relative uses PATH, can take args)
    numprocs=1                                                      ; number of processes copies to start (def 1)
    directory=/opt/kubernetes/server/bin                            ; directory to cwd to before exec (def no cwd)
    autostart=true                                                  ; start at supervisord start (default: true)
    autorestart=true                                                ; retstart at unexpected quit (default: true)
    startsecs=30                                                    ; number of secs prog must stay running (def. 1)
    startretries=3                                                  ; max # of serial start failures (default 3)
    exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
    stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
    user=root                                                       ; setuid to this UNIX account to run the program
    redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log        ; stderr log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
    killasgroup=true
    stopasgroup=true
    
    [root@rstx-203 bin]# mkdir -p /data/logs/kubernetes/kube-apiserver
    
    [root@rstx-203 bin]# supervisorctl update
    
    [root@rstx-203 bin]# netstat -luntp | grep kube-api
    tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      27303/./kube-apiser 
    tcp6       0      0 :::6443                 :::*                    LISTEN      27303/./kube-apiser 
    
    

    4 设置四层反向代理及安装VIP

    安装部署主控节点4层反向代理服务
    
    部署在rstx-201 rstx-2012机器上,用VIP 192.168.1.110的7443端口,反代rstx-203、rstx-204的apiserver6443端口
    
    
    rstx-201和rstx-2012上同时操作
    
    nginx配置
    [root@rstx-201 ~]# yum install yum-utils
    
    sudo yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo
    yum install openresty	
    ln -s /usr/local/openresty/nginx/  /etc/nginx
    
    yum install nginx -y
    
    [root@rstx-201 ~]# vi /etc/nginx/nginx.conf		-- 直接上传
    stream {
        upstream kube-apiserver {
            server 192.168.1.203:6443     max_fails=3 fail_timeout=30s;
            server 192.168.1.204:6443     max_fails=3 fail_timeout=30s;
        }
        server {
            listen 7443;
            proxy_connect_timeout 2s;
            proxy_timeout 900s;
            proxy_pass kube-apiserver;
        }
    }
    
    
    检查配置文件
    [root@rstx-201 ~]# nginx -t
    nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
    nginx: configuration file /etc/nginx/nginx.conf test is successful
    
    [root@rstx-201 ~]# systemctl start nginx
    [root@rstx-201 ~]# systemctl enable nginx
    
    
    keepalived安装配置
    [root@rstx-201 ~]# yum install keepalived -y
    
    编写监听脚本	-- 直接上传
    [root@rstx-201 ~]# vi /etc/keepalived/check_port.sh	
    #!/bin/bash
    #keepalived 监控端口脚本
    #使用方法:
    #在keepalived的配置文件中
    #vrrp_script check_port {#创建一个vrrp_script脚本,检查配置
    #    script "/etc/keepalived/check_port.sh 6379" #配置监听的端口
    #    interval 2 #检查脚本的频率,单位(秒)
    #}
    CHK_PORT=$1
    if [ -n "$CHK_PORT" ];then
            PORT_PROCESS=`ss -lnt|grep $CHK_PORT|wc -l`
            if [ $PORT_PROCESS -eq 0 ];then
                    echo "Port $CHK_PORT Is Not Used,End."
                    exit 1
            fi
    else
            echo "Check Port Cant Be Empty!"
    fi
    
    [root@rstx-201 ~]# # chmod +x /etc/keepalived/check_port.sh
    
    
    配置keepalived	-- 删除原文件,直接上传修改
    keepalived 主:
    [root@rstx-201 ~]# vi /etc/keepalived/keepalived.conf 
    ! Configuration File for keepalived
    
    global_defs {
       router_id 192.168.1.201
    
    }
    
    vrrp_script chk_nginx {
        script "/etc/keepalived/check_port.sh 7443"
        interval 2
        weight -20
    }
    
    vrrp_instance VI_1 {
        state MASTER
        interface eth0	# 根据实际网卡更改
        virtual_router_id 251
        priority 100
        advert_int 1
        mcast_src_ip 192.168.1.201
        nopreempt
    
        authentication {
            auth_type PASS
            auth_pass 11111111
        }
        track_script {
             chk_nginx
        }
        virtual_ipaddress {
            192.168.1.200
        }
    }
    
    
    
    keepalived从:    -- 直接上传
    [root@rstx-202 ~]# vi /etc/keepalived/keepalived.conf 
    ! Configuration File for keepalived
    global_defs {
            router_id 192.168.1.202
    	script_user root
            enable_script_security 
    }
    vrrp_script chk_nginx {
            script "/etc/keepalived/check_port.sh 7443"
            interval 2
            weight -20
    }
    vrrp_instance VI_1 {
            state BACKUP
            interface eth0		# 根据实际网卡更改
            virtual_router_id 251
            mcast_src_ip 192.168.1.202
            priority 90
            advert_int 1
            authentication {
                    auth_type PASS
                    auth_pass 11111111
            }
            track_script {
                    chk_nginx
            }
            virtual_ipaddress {
                    192.168.1.200
            }
    }
    
    
    [root@rstx-201 keepalived]# ss -lnt|grep 7443|wc -l         
    1
    
    [root@rstx-201 ~]# systemctl start keepalived
    [root@rstx-201 ~]# systemctl enable keepalived
    
    
    如果vip出现变动,主keepalived恢复后,一定要确认主keepalived端口起来, 服务搞好,重启keepalived,是vip变回主keepalived
    [root@rstx-201 ~]# netstat -luntp | grep 7443
    tcp        0      0 0.0.0.0:7443            0.0.0.0:*               LISTEN      22071/nginx: master 
    
    

    apiserver api调用

    [root@master ~]# kubectl proxy 
    Starting to serve on 127.0.0.1:8001
    
    [root@master ~]# curl 127.0.0.1:8001/api/v1/namespaces/default/pods
    {
      "kind": "PodList",
      "apiVersion": "v1",
      "metadata": {
        "resourceVersion": "152151"
      },
      "items": [
        {
          "metadata": {
            "name": "nginx-6799fc88d8-d6ktm",
            "generateName": "nginx-6799fc88d8-",
            "namespace": "default",
            "uid": "79b492da-40f7-4529-96ea-e7c72d341ec6",
            "resourceVersion": "120158",
            "creationTimestamp": "2021-07-30T01:24:06Z",
            "labels": {
              "app": "nginx",
              "pod-template-hash": "6799fc88d8"
            },
            "ownerReferences": [
              {
                "apiVersion": "apps/v1",
                "kind": "ReplicaSet",
                "name": "nginx-6799fc88d8",
                "uid": "1464e711-21a2-427e-93f9-2e9edabe5087",
                "controller": true,
                "blockOwnerDeletion": true
              }
            ],
            "managedFields": [
              {
                "manager": "kube-controller-manager",
                "operation": "Update",
                "apiVersion": "v1",
                "time": "2021-07-30T01:24:06Z",
                "fieldsType": "FieldsV1",
                "fieldsV1": {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:app":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{"uid":"1464e711-21a2-427e-93f9-2e9edabe5087"}":{".":{},"f:apiVersion":{},"f:blockOwnerDeletion":{},"f:controller":{},"f:kind":{},"f:name":{},"f:uid":{}}}},"f:spec":{"f:containers":{"k:{"name":"nginx"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}
              },
              {
                "manager": "kubelet",
                "operation": "Update",
                "apiVersion": "v1",
                "time": "2021-07-30T01:24:06Z",
                "fieldsType": "FieldsV1",
                "fieldsV1": {"f:status":{"f:conditions":{"k:{"type":"ContainersReady"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{"type":"Initialized"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{"type":"Ready"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}}
              }
            ]
          },
    
    
  • 相关阅读:
    WEB-INF下的jsp通过servlet中超链接跳转
    WEB-INF下的jsp怎么访问
    迭代器一般用法
    接口深层理解
    java中的接口深层理解
    动态SQL与静态SQL的区别
    TIDB集群部署
    ora-00245报错解决方法
    PostgreSQL 密码验证功能增强
    多台机器之间一键化互信脚本实现
  • 原文地址:https://www.cnblogs.com/yangtao416/p/13323864.html
Copyright © 2020-2023  润新知