• Kubernetes 进阶教程


    Kubernetes 进阶教程

    一、 服务探针

    对线上业务来说,保证服务的正常稳定是重中之重,对故障服务的及时处理避免影响业务以及快速恢复一直 是开发运维的难点。Kubernetes 提供了健康检查服务,对于检测到故障服务会被及时自动下线,以及通过重启服 务的方式使服务自动恢复。

    1、 存活性探测(LivenessProbe)

    用于判断容器是否存活,即 Pod 是否为 running 状态,如果 LivenessProbe 探针探测到容器不健康,则 kubelet 将 kill 掉容器,并根据容器的重启策略判断按照那种方式重启,如果一个容器不包含 LivenessProbe 探针,则 Kubelet 认为容器的 LivenessProbe 探针的返回值永远成功。存活性探测支持的方法有三种:ExecAction,TCPSocketAction, HTTPGetAction。

    注意:存活性检查,检测容器是否可以正常运行

    1.2Exec方式

    # 存活性探测 Exec方式
    # 定义资源类型
    kind: Deployment
    # 指定API版本号
    apiVersion: apps/v1
    # 元信息
    metadata:
      namespace: default
      name: test-deployment
      labels:
        app: test-deployment
    spec:
      # 选择器
      selector:
        # 精确匹配
        matchLabels:
          app: nginx
          dev: pro
    
      # 定义pod的模板
      template:
        metadata:
          # 跟matchlabels精确匹配只能多不能少
          labels:
            app: nginx
            dev: pro
            name: randysun
        spec:
          containers:
            - name: nginx # 容器名称
              image: nginx:latest
              imagePullPolicy: IfNotPresent # 拉取镜像规则
    
              # 存活性探针
              livenessProbe:
                exec:
                  command:
                    - cat
                    - /usr/share/nginx/html/index.html
                initialDelaySeconds: 8 # 启动8后开始探测
                timeoutSeconds: 3 # 每次执行探测超市时间,默认为1秒
                failureThreshold: 2 # 探测两次,认为服务失败
                periodSeconds: 3 # 探测的频率(3s一次)
                successThreshold: 2 # 探测两次以上成功,才认为服务成功
                
      replicas: 2
    
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: test-deployment-svc
    
    spec:
      type: NodePort
      ports:
        - port: 80
          targetPort: 80
          nodePort: 30001
      selector: # 选择器与label一样只能多不能少
        app: nginx
        dev: pro
    
    

    执行

    [root@kubernetes-master-01 ~]# kubectl apply -f livenessprobe_exec_deployment.yaml 
    deployment.apps/test-deployment created
    service/test-deployment-svc created
    [root@kubernetes-master-01 ~]# kubectl get svc
    NAME                  TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
    kubernetes            ClusterIP   10.96.0.1       <none>        443/TCP        41d
    nginx                 NodePort    10.96.106.13    <none>        80:26755/TCP   32d
    test-deployment-svc   NodePort    10.96.120.163   <none>        80:30001/TCP   5s
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-6fb857b989-84pbb   1/1     Running   0          14s
    test-deployment-6fb857b989-bvdqx   1/1     Running   0          14s
    [root@kubernetes-master-01 ~]# kubectl describe pods test-deployment-6fb857b989-84pbb 
    Name:         test-deployment-6fb857b989-84pbb
    Namespace:    default
    Priority:     0
    Node:         kubernetes-node-02/172.16.0.54
    Start Time:   Sat, 13 Mar 2021 00:35:41 +0800
    Labels:       app=nginx
                  dev=pro
                  name=randysun
                  pod-template-hash=6fb857b989
    Annotations:  <none>
    Status:       Running
    IP:           10.242.176.2
    IPs:
      IP:           10.242.176.2
    Controlled By:  ReplicaSet/test-deployment-6fb857b989
    Containers:
      nginx:
        Container ID:   docker://ff571a7c8a31fecf884fc10dfc26cbed0c9a2a9b78a3065afeecd12f38c3a946
        Image:          nginx:latest
        Image ID:       docker-pullable://nginx@sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa
        Port:           <none>
        Host Port:      <none>
        State:          Running
          Started:      Sat, 13 Mar 2021 00:35:42 +0800
        Ready:          True
        Restart Count:  0
        Liveness:       exec [cat /usr/share/nginx/html/index.html] delay=8s timeout=3s period=10s #success=1 #failure=3
        Environment:    <none>
        Mounts:
          /var/run/secrets/kubernetes.io/serviceaccount from default-token-xt6cs (ro)
    Conditions:
      Type              Status
      Initialized       True 
      Ready             True 
      ContainersReady   True 
      PodScheduled      True 
    Volumes:
      default-token-xt6cs:
        Type:        Secret (a volume populated by a Secret)
        SecretName:  default-token-xt6cs
        Optional:    false
    QoS Class:       BestEffort
    Node-Selectors:  <none>
    Tolerations:     node.kubernetes.io/not-ready:NoExecute for 360s
                     node.kubernetes.io/unreachable:NoExecute for 360s
    Events:
      Type    Reason     Age   From               Message
      ----    ------     ----  ----               -------
      Normal  Scheduled  31s   default-scheduler  Successfully assigned default/test-deployment-6fb857b989-84pbb to kubernetes-node-02
      Normal  Pulled     30s   kubelet            Container image "nginx:latest" already present on machine
      Normal  Created    30s   kubelet            Created container nginx
      Normal  Started    30s   kubelet            Started container nginx
    
    # 进入pods查看内容 
    [root@kubernetes-master-01 ~]# kubectl exec -it test-deployment-6fb857b989-84pbb -- bash
    root@test-deployment-6fb857b989-84pbb:/# cd /usr/share/nginx/html/
    root@test-deployment-6fb857b989-84pbb:/usr/share/nginx/html# ls
    50x.html  index.html
    root@test-deployment-6fb857b989-84pbb:/usr/share/nginx/html#
    

    image-20210313003710371

    image-20210313003728489

    ## 删除pods nginx目录下面的index.html文件
    [root@kubernetes-master-01 ~]# kubectl exec -it test-deployment-6fb857b989-84pbb -- bash
    root@test-deployment-6fb857b989-84pbb:/# cd /usr/share/nginx/html/
    root@test-deployment-6fb857b989-84pbb:/usr/share/nginx/html# ls
    50x.html  index.html
    root@test-deployment-6fb857b989-84pbb:/usr/share/nginx/html# 
    root@test-deployment-6fb857b989-84pbb:/usr/share/nginx/html# rm -rf index.html 
    root@test-deployment-6fb857b989-84pbb:/usr/share/nginx/html# command terminated with exit code 137
        
        
    # 再次查看  (pods被重启了,容器被重新启动了一个) 
    [root@kubernetes-master-01 ~]# kubectl exec -it test-deployment-6fb857b989-84pbb -- bash
    root@test-deployment-6fb857b989-84pbb:/# cd /usr/share/nginx/html/
    root@test-deployment-6fb857b989-84pbb:/usr/share/nginx/html# ll
    bash: ll: command not found
    root@test-deployment-6fb857b989-84pbb:/usr/share/nginx/html# ls
    50x.html  index.html
    root@test-deployment-6fb857b989-84pbb:/usr/share/nginx/html# 
    

    image-20210313004356540

    image-20210313004412427

    image-20210313004426786

    image-20210313004521156

    1.2、 TCPSocket

    # 存活性探测 TCPSocket方式
    # 定义资源类型
    kind: Deployment
    # 指定API版本号
    apiVersion: apps/v1
    # 元信息
    metadata:
      namespace: default
      name: test-deployment
      labels:
        app: test-deployment
    spec:
      # 选择器
      selector:
        # 精确匹配
        matchLabels:
          app: nginx
          dev: pro
    
      # 定义pod的模板
      template:
        metadata:
          # 跟matchlabels精确匹配只能多不能少
          labels:
            app: nginx
            dev: pro
            name: randysun
        spec:
          containers:
            - name: nginx # 容器名称
              image: nginx:latest
              imagePullPolicy: IfNotPresent # 拉取镜像规则
    
              # 存活性探针
              livenessProbe:
                tcpSocket:
                  port: 80
    
                initialDelaySeconds: 8
                timeoutSeconds: 3
    
      replicas: 2
    ---
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: test-deployment-svc
    
    spec:
      type: NodePort
      ports:
        - port: 80
          targetPort: 80
          nodePort: 30001
      selector: # 选择器与label一样只能多不能少
        app: nginx
        dev: pro
    

    执行

    [root@kubernetes-master-01 ~]# kubectl apply -f livenessprobe_tctsocket_deployment.yaml 
    deployment.apps/test-deployment created
    service/test-deployment-svc created
    [root@kubernetes-master-01 ~]# kubectl get pods
    NAME                               READY   STATUS    RESTARTS   AGE
    nginx-86c57db685-xnstd             1/1     Running   5          32d
    nginx-pod                          1/1     Running   4          5d7h
    test-deployment-5596f9cbc7-nsxx8   1/1     Running   0          6s
    test-deployment-5596f9cbc7-t4hv4   1/1     Running   0          6s
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-5596f9cbc7-nsxx8   1/1     Running   0          16s
    test-deployment-5596f9cbc7-t4hv4   1/1     Running   0          16s
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-5596f9cbc7-nsxx8   1/1     Running   0          51s
    test-deployment-5596f9cbc7-t4hv4   1/1     Running   0          51s
    [root@kubernetes-master-01 ~]# kubectl describe pods test-deployment-5596f9cbc7-nsxx8 
    Name:         test-deployment-5596f9cbc7-nsxx8
    Namespace:    default
    Priority:     0
    Node:         kubernetes-node-01/172.16.0.53
    Start Time:   Sat, 13 Mar 2021 00:51:14 +0800
    Labels:       app=nginx
                  dev=pro
                  name=randysun
                  pod-template-hash=5596f9cbc7
    Annotations:  <none>
    Status:       Running
    IP:           10.241.56.2
    IPs:
      IP:           10.241.56.2
    Controlled By:  ReplicaSet/test-deployment-5596f9cbc7
    Containers:
      nginx:
        Container ID:   docker://b238423c7b7e9148fc513ff553bdb2ff8599ae8b71966d923ffef9e49ccdc539
        Image:          nginx:latest
        Image ID:       docker-pullable://nginx@sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa
        Port:           <none>
        Host Port:      <none>
        State:          Running
          Started:      Sat, 13 Mar 2021 00:51:15 +0800
        Ready:          True
        Restart Count:  0
        Liveness:       tcp-socket :80 delay=8s timeout=3s period=10s #success=1 #failure=3
        Environment:    <none>
        Mounts:
          /var/run/secrets/kubernetes.io/serviceaccount from default-token-xt6cs (ro)
    Conditions:
      Type              Status
      Initialized       True 
      Ready             True 
      ContainersReady   True 
      PodScheduled      True 
    Volumes:
      default-token-xt6cs:
        Type:        Secret (a volume populated by a Secret)
        SecretName:  default-token-xt6cs
        Optional:    false
    QoS Class:       BestEffort
    Node-Selectors:  <none>
    Tolerations:     node.kubernetes.io/not-ready:NoExecute for 360s
                     node.kubernetes.io/unreachable:NoExecute for 360s
    Events:
      Type    Reason     Age   From               Message
      ----    ------     ----  ----               -------
      Normal  Scheduled  66s   default-scheduler  Successfully assigned default/test-deployment-5596f9cbc7-nsxx8 to kubernetes-node-01
      Normal  Pulled     66s   kubelet            Container image "nginx:latest" already present on machine
      Normal  Created    66s   kubelet            Created container nginx
      Normal  Started    66s   kubelet            Started container nginx
    
    

    image-20210313005300293

    image-20210313005328655

    修改错误端口

    # 存活性探测 TCPSocket方式
    # 定义资源类型
    kind: Deployment
    # 指定API版本号
    apiVersion: apps/v1
    # 元信息
    metadata:
      namespace: default
      name: test-deployment
      labels:
        app: test-deployment
    spec:
      # 选择器
      selector:
        # 精确匹配
        matchLabels:
          app: nginx
          dev: pro
    
      # 定义pod的模板
      template:
        metadata:
          # 跟matchlabels精确匹配只能多不能少
          labels:
            app: nginx
            dev: pro
            name: randysun
        spec:
          containers:
            - name: nginx # 容器名称
              image: nginx:latest
              imagePullPolicy: IfNotPresent # 拉取镜像规则
    
              # 存活性探针
              livenessProbe:
                tcpSocket:
                  port: 8099 # 修改错误监听端口
                  
                initialDelaySeconds: 8
                timeoutSeconds: 3
    
      replicas: 2
    ---
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: test-deployment-svc
    
    spec:
      type: NodePort
      ports:
        - port: 80
          targetPort: 80
          nodePort: 30001
      selector: # 选择器与label一样只能多不能少
        app: nginx
        dev: pro
    
    

    执行

    [root@kubernetes-master-01 ~]# kubectl apply -f livenessprobe_tctsocket_deployment.yaml 
    deployment.apps/test-deployment created
    service/test-deployment-svc created
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-6c945c9cdc-n5zsl   1/1     Running   0          13s
    test-deployment-6c945c9cdc-zmqsq   1/1     Running   0          13s
    
    # 重启了两次
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-6c945c9cdc-n5zsl   1/1     Running   2          79s
    test-deployment-6c945c9cdc-zmqsq   1/1     Running   2          79s
    
    
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-6c945c9cdc-n5zsl   1/1     Running   5          3m26s
    test-deployment-6c945c9cdc-zmqsq   1/1     Running   5          3m26s
    

    image-20210313005829556

    image-20210313005838432

    image-20210313005931785

    1.3、 HTTPGet

    # 存活性探测 HTTPGet方式
    # 定义资源类型
    kind: Deployment
    # 指定API版本号
    apiVersion: apps/v1
    # 元信息
    metadata:
      namespace: default
      name: test-deployment
      labels:
        app: test-deployment
    spec:
      # 选择器
      selector:
        # 精确匹配
        matchLabels:
          app: nginx
          dev: pro
    
      # 定义pod的模板
      template:
        metadata:
          # 跟matchlabels精确匹配只能多不能少
          labels:
            app: nginx
            dev: pro
            name: randysun
        spec:
          containers:
            - name: nginx # 容器名称
              image: nginx:latest
              imagePullPolicy: IfNotPresent # 拉取镜像规则
    
              # 存活性探针
              livenessProbe:
                httpGet:
                  port: 80
                  path: /
                  host: 127.0.0.1
                  scheme: HTTP
    
      # 定义副本pod数量
      replicas: 2
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: test-deployment-svc
    
    spec:
      type: NodePort
      ports:
        - port: 80
          targetPort: 80
          nodePort: 30001
      selector: # 选择器与label一样只能多不能少
        app: nginx
        dev: pro
    
    

    image-20210313000026099

    [root@kubernetes-master-01 ~]# kubectl delete -f deployment.yaml 
    deployment.apps "test-deployment" deleted
    service "test-deployment-svc" deleted
    [root@kubernetes-master-01 ~]# kubectl apply -f deployment.yaml 
    deployment.apps/test-deployment created
    service/test-deployment-svc created
    [root@kubernetes-master-01 ~]# kubectl get pods -l pro=dev
    No resources found in default namespace.
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                              READY   STATUS    RESTARTS   AGE
    test-deployment-8c6b557b5-bfdjm   1/1     Running   0          27s
    test-deployment-8c6b557b5-vpjz6   1/1     Running   0          27s
    [root@kubernetes-master-01 ~]# kubectl describe pods test-deployment-8c6b557b5-bfdjm 
    Name:         test-deployment-8c6b557b5-bfdjm
    Namespace:    default
    Priority:     0
    Node:         kubernetes-node-01/172.16.0.53
    Start Time:   Sat, 13 Mar 2021 00:05:43 +0800
    Labels:       app=nginx
                  dev=pro
                  name=randysun
                  pod-template-hash=8c6b557b5
    Annotations:  <none>
    Status:       Running
    IP:           10.241.56.2
    IPs:
      IP:           10.241.56.2
    Controlled By:  ReplicaSet/test-deployment-8c6b557b5
    Containers:
      nginx:
        Container ID:   docker://d213a624a6c79cbeef234b565ca0430fa3be4713fbdd3f31afd1da01ba51dbbc
        Image:          nginx:latest
        Image ID:       docker-pullable://nginx@sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa
        Port:           <none>
        Host Port:      <none>
        State:          Running
          Started:      Sat, 13 Mar 2021 00:05:45 +0800
        Ready:          True
        Restart Count:  0
        Liveness:       http-get http://:80/ delay=0s timeout=1s period=10s #success=1 #failure=3
        Environment:    <none>
        Mounts:
          /var/run/secrets/kubernetes.io/serviceaccount from default-token-xt6cs (ro)
    Conditions:
      Type              Status
      Initialized       True 
      Ready             True 
      ContainersReady   True 
      PodScheduled      True 
    Volumes:
      default-token-xt6cs:
        Type:        Secret (a volume populated by a Secret)
        SecretName:  default-token-xt6cs
        Optional:    false
    QoS Class:       BestEffort
    Node-Selectors:  <none>
    Tolerations:     node.kubernetes.io/not-ready:NoExecute for 360s
                     node.kubernetes.io/unreachable:NoExecute for 360s
    Events:
      Type    Reason     Age    From               Message
      ----    ------     ----   ----               -------
      Normal  Scheduled  2m11s  default-scheduler  Successfully assigned default/test-deployment-8c6b557b5-bfdjm to kubernetes-node-01
      Normal  Pulled     2m11s  kubelet            Container image "nginx:latest" already present on machine
      Normal  Created    2m10s  kubelet            Created container nginx
      Normal  Started    2m10s  kubelet            Started container nginx
    
    

    image-20210313000937797

    修改检测错误的端口

    # 定义资源类型
    kind: Deployment
    # 指定API版本号
    apiVersion: apps/v1
    # 元信息
    metadata:
      namespace: default
      name: test-deployment
      labels:
        app: test-deployment
    spec:
      # 选择器
      selector:
        # 精确匹配
        matchLabels:
          app: nginx
          dev: pro
    
      # 定义pod的模板
      template:
        metadata:
          # 跟matchlabels精确匹配只能多不能少
          labels:
            app: nginx
            dev: pro
            name: randysun
        spec:
          containers:
            - name: nginx # 容器名称
              image: nginx:latest
              imagePullPolicy: IfNotPresent # 拉取镜像规则
              # 存活性探针
              livenessProbe:
                httpGet:
                  port: 8080 # 修改探测不存在端口
                  path: /
    
      # 定义副本pod数量
      replicas: 2
    
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: test-deployment-svc
    
    spec:
      type: NodePort
      ports:
        - port: 80
          targetPort: 80
          nodePort: 30001
      selector: # 选择器与label一样只能多不能少
        app: nginx
        dev: pro
    
    

    执行

    [root@kubernetes-master-01 ~]# kubectl delete -f deployment.yaml 
    deployment.apps "test-deployment" deleted
    ku	cservice "test-deployment-svc" deleted
    [root@kubernetes-master-01 ~]# kubectl apply -f deployment.yaml 
    deployment.apps/test-deployment created
    service/test-deployment-svc created
    [root@kubernetes-master-01 ~]# kubectl get pods dev=pro
    Error from server (NotFound): pods "dev=pro" not found
    [root@kubernetes-master-01 ~]# kubectl get pods -l  dev=pro
    NAME                               READY   STATUS              RESTARTS   AGE
    test-deployment-5b78dcf669-2nvwm   0/1     ContainerCreating   0          23s
    test-deployment-5b78dcf669-cd6xd   1/1     Running             0          23s
    [root@kubernetes-master-01 ~]# kubectl get pods -l  dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-5b78dcf669-2nvwm   1/1     Running   0          47s
    test-deployment-5b78dcf669-cd6xd   1/1     Running   1          47s
    [root@kubernetes-master-01 ~]# kubectl get pods -l  dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-5b78dcf669-2nvwm   1/1     Running   1          96s
    test-deployment-5b78dcf669-cd6xd   1/1     Running   3          96s
    
    # 查看pods的状态,不断的重启pods
    [root@kubernetes-master-01 ~]# kubectl get pods -l  dev=pro
    NAME                               READY   STATUS             RESTARTS   AGE
    test-deployment-5b78dcf669-2nvwm   0/1     CrashLoopBackOff   6          7m40s
    test-deployment-5b78dcf669-cd6xd   0/1     CrashLoopBackOff   6          7m40s
    
    
    
    # 查看执行的结果 
    [root@kubernetes-master-01 ~]# kubectl describe pods test-deployment-5b78dcf669-2nvwm 
    Name:         test-deployment-5b78dcf669-2nvwm
    Namespace:    default
    Priority:     0
    Node:         kubernetes-master-03/172.16.0.52
    Start Time:   Sat, 13 Mar 2021 00:13:17 +0800
    Labels:       app=nginx
                  dev=pro
                  name=randysun
                  pod-template-hash=5b78dcf669
    Annotations:  <none>
    Status:       Running
    IP:           10.241.136.3
    IPs:
      IP:           10.241.136.3
    Controlled By:  ReplicaSet/test-deployment-5b78dcf669
    Containers:
      nginx:
        Container ID:   docker://f61694864cf7c9faf1ca1e7ffede5297dc14ffae0acd2a42138bc744e8fc85f6
        Image:          nginx:latest
        Image ID:       docker-pullable://nginx@sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa
        Port:           <none>
        Host Port:      <none>
        State:          Waiting
          Reason:       CrashLoopBackOff
        Last State:     Terminated
          Reason:       Completed
          Exit Code:    0
          Started:      Sat, 13 Mar 2021 00:15:58 +0800
          Finished:     Sat, 13 Mar 2021 00:16:28 +0800
        Ready:          False
        Restart Count:  4
        Liveness:       http-get http://:8080/ delay=0s timeout=1s period=10s #success=1 #failure=3
        Environment:    <none>
        Mounts:
          /var/run/secrets/kubernetes.io/serviceaccount from default-token-xt6cs (ro)
    Conditions:
      Type              Status
      Initialized       True 
      Ready             False 
      ContainersReady   False 
      PodScheduled      True 
    Volumes:
      default-token-xt6cs:
        Type:        Secret (a volume populated by a Secret)
        SecretName:  default-token-xt6cs
        Optional:    false
    QoS Class:       BestEffort
    Node-Selectors:  <none>
    Tolerations:     node.kubernetes.io/not-ready:NoExecute for 360s
                     node.kubernetes.io/unreachable:NoExecute for 360s
    Events:
      Type     Reason     Age                   From               Message
      ----     ------     ----                  ----               -------
      Normal   Scheduled  3m46s                 default-scheduler  Successfully assigned default/test-deployment-5b78dcf669-2nvwm to kubernetes-master-03
      Normal   Pulled     95s (x4 over 3m5s)    kubelet            Container image "nginx:latest" already present on machine
      Normal   Created    95s (x4 over 3m5s)    kubelet            Created container nginx
      Normal   Started    95s (x4 over 3m4s)    kubelet            Started container nginx
      Normal   Killing    95s (x3 over 2m35s)   kubelet            Container nginx failed liveness probe, will be restarted
      Warning  Unhealthy  85s (x10 over 2m55s)  kubelet            Liveness probe failed: Get http://10.241.136.3:8080/: dial tcp 10.241.136.3:8080: connect: connection refused
    
    

    image-20210313001550661

    image-20210313001631635

    image-20210313002024025

    1.4、 参数详解

    • failureThreshold:最少连续几次探测失败的次数,满足该次数则认为 fail
    • initialDelaySeconds:容器启动之后开始进行存活性探测的秒数。不填立即进行
    • periodSeconds:执行探测的频率(秒)。默认为 10 秒。最小值为 1。
    • successThreshold:探测失败后,最少连续探测成功多少次才被认定为成功,满足该次数则认为 success。(但 是如果是 liveness 则必须是 1。最小值是 1。)
    • timeoutSeconds:每次执行探测的超时时间,默认 1

    2、 就绪性探测

    用于判断容器是否正常提供服务,即容器的 Ready 是否为 True是否可以接收请求,如果 ReadinessProbe 探测失败,则容器的 Ready 将设置为 False,控制器将此 Pod 的 Endpoint 从对应的 service 的 Endpoint 列表中移除, 从此不再将任何请求调度此 Pod 上,直到下次探测成功。(剔除此 pod,不参与接收请求不会将流量转发给此 Pod

    注:就绪性检查,探测当前服务是否正常对外提供服务

    image-20210315080642535

    探测失败

    image-20210315081101082

    image-20210315081155644

    2.1、 HTTPGet

    通过访问某个 URL 的方式探测当前 POD 是否可以正常对外提供服务。

    # 就绪性探测 HTTPGet方式
    # 定义资源类型
    kind: Deployment
    # 指定API版本号
    apiVersion: apps/v1
    # 元信息
    metadata:
      namespace: default
      name: test-deployment
      labels:
        app: test-deployment
    spec:
      # 选择器
      selector:
        # 精确匹配
        matchLabels:
          app: nginx
          dev: pro
    
      # 定义pod的模板
      template:
        metadata:
          # 跟matchlabels精确匹配只能多不能少
          labels:
            app: nginx
            dev: pro
            name: randysun
        spec:
          containers:
            - name: nginx # 容器名称
              image: nginx:latest
              imagePullPolicy: IfNotPresent # 拉取镜像规则
    
              # 存活性探针
              readinessProbe:
                httpGet:
                  port: 80
                  path: /
                  # host: 127.0.0.1 写这个Ip会探测失败,探测ip是pods的ip
    
      replicas: 2
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: test-deployment-svc
    
    spec:
      type: NodePort
      ports:
        - port: 80
          targetPort: 80
          nodePort: 30001
      selector: # 选择器与label一样只能多不能少
        app: nginx
        dev: pro
    
    

    执行

    # 创建Pods
    [root@kubernetes-master-01 ~]# kubectl apply -f readinessprobe_httpget_deployment.yaml 
    deployment.apps/test-deployment created
    service/test-deployment-svc created
    # 查看创建pods
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-7bc4d875fc-ghwpb   1/1     Running   0          31s
    test-deployment-7bc4d875fc-v4w6r   1/1     Running   0          31s
    
    # 查看pods详细信息
    [root@kubernetes-master-01 ~]# kubectl describe pods test-deployment-7bc4d875fc-ghwpb 
    Name:         test-deployment-7bc4d875fc-ghwpb
    Namespace:    default
    Priority:     0
    Node:         kubernetes-node-01/172.16.0.53
    Start Time:   Mon, 15 Mar 2021 08:21:23 +0800
    Labels:       app=nginx
                  dev=pro
                  name=randysun
                  pod-template-hash=7bc4d875fc
    Annotations:  <none>
    Status:       Running
    IP:           10.241.32.2
    IPs:
      IP:           10.241.32.2
    Controlled By:  ReplicaSet/test-deployment-7bc4d875fc
    Containers:
      nginx:
        Container ID:   docker://c24f2345de24d32314dee1d1e55c698d849618b692fbf953e0dbd28e1f587f39
        Image:          nginx:latest
        Image ID:       docker-pullable://nginx@sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa
        Port:           <none>
        Host Port:      <none>
        State:          Running
          Started:      Mon, 15 Mar 2021 08:21:24 +0800
        Ready:          True 
        Restart Count:  0
        Readiness:      http-get http://:80/ delay=0s timeout=1s period=10s #success=1 #failure=3 就绪探测HTTPGet方式
        Environment:    <none>
        Mounts:
          /var/run/secrets/kubernetes.io/serviceaccount from default-token-xt6cs (ro)
    Conditions:
      Type              Status
      Initialized       True 
      Ready             True  # 探测为True
      ContainersReady   True 
      PodScheduled      True 
    Volumes:
      default-token-xt6cs:
        Type:        Secret (a volume populated by a Secret)
        SecretName:  default-token-xt6cs
        Optional:    false
    QoS Class:       BestEffort
    Node-Selectors:  <none>
    Tolerations:     node.kubernetes.io/not-ready:NoExecute for 360s
                     node.kubernetes.io/unreachable:NoExecute for 360s
    Events:
      Type    Reason     Age   From               Message
      ----    ------     ----  ----               -------
      Normal  Scheduled  2m    default-scheduler  Successfully assigned default/test-deployment-7bc4d875fc-ghwpb to kubernetes-node-01
      Normal  Pulled     119s  kubelet            Container image "nginx:latest" already present on machine
      Normal  Created    119s  kubelet            Created container nginx
      Normal  Started    119s  kubelet            Started container nginx
    
     
    
    # 查看svc
    [root@kubernetes-master-01 ~]# kubectl describe svc test-deployment-svc 
    Name:                     test-deployment-svc
    Namespace:                default
    Labels:                   <none>
    Annotations:              Selector:  app=nginx,dev=pro
    Type:                     NodePort
    IP:                       10.96.27.250
    Port:                     <unset>  80/TCP
    TargetPort:               80/TCP
    NodePort:                 <unset>  30001/TCP
    Endpoints:                10.241.216.3:80,10.241.32.3:80
    Session Affinity:         None
    External Traffic Policy:  Cluster
    Events:                   <none>
    
    

    image-20210315082629363

    image-20210315084245674

    image-20210315084326381

    修改监听错误端口

    # 就绪性探测 HTTPGet方式
    # 定义资源类型
    kind: Deployment
    # 指定API版本号
    apiVersion: apps/v1
    # 元信息
    metadata:
      namespace: default
      name: test-deployment
      labels:
        app: test-deployment
    spec:
      # 选择器
      selector:
        # 精确匹配
        matchLabels:
          app: nginx
          dev: pro
    
      # 定义pod的模板
      template:
        metadata:
          # 跟matchlabels精确匹配只能多不能少
          labels:
            app: nginx
            dev: pro
            name: randysun
        spec:
          containers:
            - name: nginx # 容器名称
              image: nginx:latest
              imagePullPolicy: IfNotPresent # 拉取镜像规则
    
              # 存活性探针
              readinessProbe:
                httpGet:
                  port: 8080 # 修改监听错误端口
                  path: /
    
      replicas: 2
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: test-deployment-svc
    
    spec:
      type: NodePort
      ports:
        - port: 80
          targetPort: 80
          nodePort: 30001
      selector: # 选择器与label一样只能多不能少
        app: nginx
        dev: pro
    
    

    执行

    [root@kubernetes-master-01 ~]# kubectl apply -f readinessprobe_httpget_deployment.yaml 
    deployment.apps/test-deployment configured
    service/test-deployment-svc unchanged
    
    
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-5975846857-4f6xq   0/1     Running   0          10s
    test-deployment-5975846857-h6fp9   0/1     Running   0          11s
    
    
    [root@kubernetes-master-01 ~]# kubectl describe pods test-deployment-5975846857-7x7qp 
    Name:         test-deployment-5975846857-7x7qp
    Namespace:    default
    Priority:     0
    Node:         kubernetes-master-03/172.16.0.52
    Start Time:   Mon, 15 Mar 2021 08:30:42 +0800
    Labels:       app=nginx
                  dev=pro
                  name=randysun
                  pod-template-hash=5975846857
    Annotations:  <none>
    Status:       Running
    IP:           10.242.176.3
    IPs:
      IP:           10.242.176.3
    Controlled By:  ReplicaSet/test-deployment-5975846857
    Containers:
      nginx:
        Container ID:   docker://4dd97b86321fb715b4003495559012cfe579ef400e1fbdd15c00011f37849ba6
        Image:          nginx:latest
        Image ID:       docker-pullable://nginx@sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa
        Port:           <none>
        Host Port:      <none>
        State:          Running
          Started:      Mon, 15 Mar 2021 08:30:43 +0800
        Ready:          False
        Restart Count:  0
        Readiness:      http-get http://:8080/ delay=0s timeout=1s period=10s #success=1 #failure=3
        Environment:    <none>
        Mounts:
          /var/run/secrets/kubernetes.io/serviceaccount from default-token-xt6cs (ro)
    Conditions:
      Type              Status
      Initialized       True 
      Ready             False 
      ContainersReady   False 
      PodScheduled      True 
    Volumes:
      default-token-xt6cs:
        Type:        Secret (a volume populated by a Secret)
        SecretName:  default-token-xt6cs
        Optional:    false
    QoS Class:       BestEffort
    Node-Selectors:  <none>
    Tolerations:     node.kubernetes.io/not-ready:NoExecute for 360s
                     node.kubernetes.io/unreachable:NoExecute for 360s
    Events:
      Type     Reason     Age               From               Message
      ----     ------     ----              ----               -------
      Normal   Scheduled  41s               default-scheduler  Successfully assigned default/test-deployment-5975846857-7x7qp to kubernetes-master-03
      Normal   Pulled     40s               kubelet            Container image "nginx:latest" already present on machine
      Normal   Created    40s               kubelet            Created container nginx
      Normal   Started    40s               kubelet            Started container nginx
      Warning  Unhealthy  5s (x4 over 35s)  kubelet            Readiness probe failed: Get http://10.242.176.3:8080/: dial tcp 10.242.176.3:8080: connect: connection refused
    
                                
    
    
    [root@kubernetes-master-01 ~]# kubectl describe svc test-deployment-svc 
    Name:                     test-deployment-svc
    Namespace:                default
    Labels:                   <none>
    Annotations:              Selector:  app=nginx,dev=pro
    Type:                     NodePort
    IP:                       10.96.27.250
    Port:                     <unset>  80/TCP
    TargetPort:               80/TCP
    NodePort:                 <unset>  30001/TCP
    Endpoints:                
    Session Affinity:         None
    External Traffic Policy:  Cluster
    Events:                   <none>
    
    

    image-20210315083337223

    image-20210315083412567

    image-20210315083911494

    image-20210315084112710

    2.2、 Exec

    通过执行一条命令,探测服务是否可以正常对外提供服务。

    
    # 就绪性探测 Exec方式
    # 定义资源类型
    kind: Deployment
    # 指定API版本号
    apiVersion: apps/v1
    # 元信息
    metadata:
      namespace: default
      name: test-deployment
      labels:
        app: test-deployment
    spec:
      # 选择器
      selector:
        # 精确匹配
        matchLabels:
          app: nginx
          dev: pro
    
      # 定义pod的模板
      template:
        metadata:
          # 跟matchlabels精确匹配只能多不能少
          labels:
            app: nginx
            dev: pro
            name: randysun
        spec:
          containers:
            - name: nginx # 容器名称
              image: nginx:latest
              imagePullPolicy: IfNotPresent # 拉取镜像规则
    
              # 就绪性探针
              readinessProbe:
                exec:
                  command:
                    - cat
                    - /usr/share/nginx/html/index.html
    
      replicas: 2
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: test-deployment-svc
    
    spec:
      type: NodePort
      ports:
        - port: 80
          targetPort: 80
          nodePort: 30001
      selector: # 选择器与label一样只能多不能少
        app: nginx
        dev: pro
    

    执行

    [root@kubernetes-master-01 ~]# kubectl apply -f readinessprobe_exec_deployment.yaml 
    deployment.apps/test-deployment created
    service/test-deployment-svc created
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-6559f99565-2dkqg   1/1     Running   0          13s
    test-deployment-6559f99565-5rq2l   0/1     Running   0          13s
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-6559f99565-2dkqg   1/1     Running   0          19s
    test-deployment-6559f99565-5rq2l   1/1     Running   0          19s
    [root@kubernetes-master-01 ~]# kubectl describe pods test-deployment-6559f99565-2dkqg 
    Name:         test-deployment-6559f99565-2dkqg
    Namespace:    default
    Priority:     0
    Node:         kubernetes-node-02/172.16.0.54
    Start Time:   Mon, 15 Mar 2021 22:13:13 +0800
    Labels:       app=nginx
                  dev=pro
                  name=randysun
                  pod-template-hash=6559f99565
    Annotations:  <none>
    Status:       Running
    IP:           10.241.216.2
    IPs:
      IP:           10.241.216.2
    Controlled By:  ReplicaSet/test-deployment-6559f99565
    Containers:
      nginx:
        Container ID:   docker://f775b67819ec8f985fffe8b29b5d4eb92c9a9649142b7feaa4aa67feab31c986
        Image:          nginx:latest
        Image ID:       docker-pullable://nginx@sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa
        Port:           <none>
        Host Port:      <none>
        State:          Running
          Started:      Mon, 15 Mar 2021 22:13:14 +0800
        Ready:          True
        Restart Count:  0
        Readiness:      exec [cat /usr/share/nginx/html/index.html] delay=0s timeout=1s period=10s #success=1 #failure=3
        Environment:    <none>
        Mounts:
          /var/run/secrets/kubernetes.io/serviceaccount from default-token-xt6cs (ro)
    Conditions:
      Type              Status
      Initialized       True 
      Ready             True 
      ContainersReady   True 
      PodScheduled      True 
    Volumes:
      default-token-xt6cs:
        Type:        Secret (a volume populated by a Secret)
        SecretName:  default-token-xt6cs
        Optional:    false
    QoS Class:       BestEffort
    Node-Selectors:  <none>
    Tolerations:     node.kubernetes.io/not-ready:NoExecute for 360s
                     node.kubernetes.io/unreachable:NoExecute for 360s
    Events:
      Type    Reason     Age   From               Message
      ----    ------     ----  ----               -------
      Normal  Scheduled  30s   default-scheduler  Successfully assigned default/test-deployment-6559f99565-2dkqg to kubernetes-node-02
      Normal  Pulled     30s   kubelet            Container image "nginx:latest" already present on machine
      Normal  Created    30s   kubelet            Created container nginx
      Normal  Started    30s   kubelet            Started container nginx
    
    

    image-20210315221509240

    2.3、 TCPSocket

    通过 ping 某个端口的方式,探测服务是否可以正常对外提供服务。

    # 就绪性探测 TCPSocket方式
    # 定义资源类型
    kind: Deployment
    # 指定API版本号
    apiVersion: apps/v1
    # 元信息
    metadata:
      namespace: default
      name: test-deployment
      labels:
        app: test-deployment
    spec:
      # 选择器
      selector:
        # 精确匹配
        matchLabels:
          app: nginx
          dev: pro
    
      # 定义pod的模板
      template:
        metadata:
          # 跟matchlabels精确匹配只能多不能少
          labels:
            app: nginx
            dev: pro
            name: randysun
        spec:
          containers:
            - name: nginx # 容器名称
              image: nginx:latest
              imagePullPolicy: IfNotPresent # 拉取镜像规则
    
              # 就绪性探针
              readinessProbe:
                tcpSocket:
                  port: 80
    
      replicas: 2
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: test-deployment-svc
    
    spec:
      type: NodePort
      ports:
        - port: 80
          targetPort: 80
          nodePort: 30001
      selector: # 选择器与label一样只能多不能少
        app: nginx
        dev: pro
    
    

    执行

    [root@kubernetes-master-01 ~]# kubectl apply -f readinessprobe_tcpsocket_deployment.yaml 
    deployment.apps/test-deployment created
    service/test-deployment-svc created
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-8578584455-b5pg5   0/1     Running   0          9s
    test-deployment-8578584455-dkx4d   1/1     Running   0          9s
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-8578584455-b5pg5   1/1     Running   0          50s
    test-deployment-8578584455-dkx4d   1/1     Running   0          50s
    [root@kubernetes-master-01 ~]# kubectl describe pods test-deployment-8578584455-b5pg5 
    Name:         test-deployment-8578584455-b5pg5
    Namespace:    default
    Priority:     0
    Node:         kubernetes-node-02/172.16.0.54
    Start Time:   Mon, 15 Mar 2021 22:17:59 +0800
    Labels:       app=nginx
                  dev=pro
                  name=randysun
                  pod-template-hash=8578584455
    Annotations:  <none>
    Status:       Running
    IP:           10.241.216.2
    IPs:
      IP:           10.241.216.2
    Controlled By:  ReplicaSet/test-deployment-8578584455
    Containers:
      nginx:
        Container ID:   docker://a41f30b00fef1467074c2ecb1fe9137d8a2b29f606f7ad7712f7081ebb0bb568
        Image:          nginx:latest
        Image ID:       docker-pullable://nginx@sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa
        Port:           <none>
        Host Port:      <none>
        State:          Running
          Started:      Mon, 15 Mar 2021 22:18:00 +0800
        Ready:          True
        Restart Count:  0
        Readiness:      tcp-socket :80 delay=0s timeout=1s period=10s #success=1 #failure=3
        Environment:    <none>
        Mounts:
          /var/run/secrets/kubernetes.io/serviceaccount from default-token-xt6cs (ro)
    Conditions:
      Type              Status
      Initialized       True 
      Ready             True 
      ContainersReady   True 
      PodScheduled      True 
    Volumes:
      default-token-xt6cs:
        Type:        Secret (a volume populated by a Secret)
        SecretName:  default-token-xt6cs
        Optional:    false
    QoS Class:       BestEffort
    Node-Selectors:  <none>
    Tolerations:     node.kubernetes.io/not-ready:NoExecute for 360s
                     node.kubernetes.io/unreachable:NoExecute for 360s
    Events:
      Type    Reason     Age   From               Message
      ----    ------     ----  ----               -------
      Normal  Scheduled  63s   default-scheduler  Successfully assigned default/test-deployment-8578584455-b5pg5 to kubernetes-node-02
      Normal  Pulled     63s   kubelet            Container image "nginx:latest" already present on machine
      Normal  Created    63s   kubelet            Created container nginx
      Normal  Started    62s   kubelet            Started container nginx
    
    

    image-20210315222122279

    3、 回调 HOOK

    实际上 Kubernetes 为我们的容器提供了生命周期钩子的,就是我们说的 Pod Hook,Pod Hook 是由 kubelet 发起的,当容器中的进程启动前或者容器中的进程终止之前运行,这是包含在容器的生命周期之中。我们可以同 时为 Pod 中的所有容器都配置 hook。 Kubernetes 为我们提供了两种钩子函数:

    • PostStart(创建容器成功一瞬间):这个钩子在容器创建后立即执行。但是,并不能保证钩子将在容器 ENTRYPOINT 之前运行,因为 没有参数传递给处理程序。主要用于资源部署、环境准备等。不过需要注意的是如果钩子花费太长时间以至 于不能运行或者挂起, 容器将不能达到 running 状态。(可用于开始的时候下载密钥)
    • PreStop:这个钩子在容器终止之前立即被调用。它是阻塞的,意味着它是同步的, 所以它必须在删除容器 的调用发出之前完成。主要用于优雅关闭应用程序、通知其他系统等。如果钩子在执行期间挂起, Pod 阶 段将停留在 running 状态并且永不会达到 failed 状态。(停止容器的时候删除密钥,保证相对安全,对保密的数据)

    如果 PostStart 或者 PreStop 钩子失败, 它会杀死容器。所以我们应该让钩子函数尽可能的轻量。当然有些 情况下,长时间运行命令是合理的, 比如在停止容器之前预先保存状态

    apiVersion: v1
    kind: Pod
    metadata:
    	name: hook-demo1
    spec:
    	containers:
    		- name: hook-dem
                image: nginx
    			lifecycle:
    				postStart:
    					exec:
    						command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
    

    案例

    # 回调钩子HOOK PostStart方式
    # 定义资源类型
    kind: Deployment
    # 指定API版本号
    apiVersion: apps/v1
    # 元信息
    metadata:
      namespace: default
      name: test-deployment
      labels:
        app: test-deployment
    spec:
      # 选择器
      selector:
        # 精确匹配
        matchLabels:
          app: nginx
          dev: pro
    
      # 定义pod的模板
      template:
        metadata:
          # 跟matchlabels精确匹配只能多不能少
          labels:
            app: nginx
            dev: pro
            name: randysun
        spec:
          containers:
            - name: nginx # 容器名称
              image: nginx:latest
              imagePullPolicy: IfNotPresent # 拉取镜像规则
    
              # 存活性探针
              livenessProbe:
                exec:
                  command:
                    - cat
                    - /usr/share/nginx/html/demo.html
              # 回调钩子
              lifecycle:
                postStart: # 容器创建后立刻执行
                  exec:
                    command:
                      - touch # 命令容器中一定是存在的
                      - /usr/share/nginx/html/demo.html
                
    
      replicas: 2
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: test-deployment-svc
    
    spec:
      type: NodePort
      ports:
        - port: 80
          targetPort: 80
          nodePort: 30001
      selector: # 选择器与label一样只能多不能少
        app: nginx
        dev: pro
    
    

    执行

    # 创建Pods
    [root@kubernetes-master-01 ~]# kubectl apply -f hook_poststart_deployment.yaml 
    deployment.apps/test-deployment created
    service/test-deployment-svc created
    # 查看pods
    [root@kubernetes-master-01 ~]# kubectl get pods -l dev=pro
    NAME                               READY   STATUS    RESTARTS   AGE
    test-deployment-68cffdb664-258f6   1/1     Running   0          8s
    test-deployment-68cffdb664-2pj89   1/1     Running   0          8s
    
    # 查看详细信息
    [root@kubernetes-master-01 ~]# kubectl describe pods test-deployment-68cffdb664-2pj89 
    Name:         test-deployment-68cffdb664-2pj89
    Namespace:    default
    Priority:     0
    Node:         kubernetes-node-02/172.16.0.54
    Start Time:   Mon, 15 Mar 2021 08:57:41 +0800
    Labels:       app=nginx
                  dev=pro
                  name=randysun
                  pod-template-hash=68cffdb664
    Annotations:  <none>
    Status:       Running
    IP:           10.241.216.2
    IPs:
      IP:           10.241.216.2
    Controlled By:  ReplicaSet/test-deployment-68cffdb664
    Containers:
      nginx:
        Container ID:   docker://5dbbb508735a9535f209c524935d50b2a5d490755e0a5cf3ebfb473ef93b67cc
        Image:          nginx:latest
        Image ID:       docker-pullable://nginx@sha256:10b8cc432d56da8b61b070f4c7d2543a9ed17c2b23010b43af434fd40e2ca4aa
        Port:           <none>
        Host Port:      <none>
        State:          Running
          Started:      Mon, 15 Mar 2021 08:57:42 +0800
        Ready:          True
        Restart Count:  0
        Liveness:       exec [cat /usr/share/nginx/html/demo.html] delay=0s timeout=1s period=10s #success=1 #failure=3
        Environment:    <none>
        Mounts:
          /var/run/secrets/kubernetes.io/serviceaccount from default-token-xt6cs (ro)
    Conditions:
      Type              Status
      Initialized       True 
      Ready             True 
      ContainersReady   True 
      PodScheduled      True 
    Volumes:
      default-token-xt6cs:
        Type:        Secret (a volume populated by a Secret)
        SecretName:  default-token-xt6cs
        Optional:    false
    QoS Class:       BestEffort
    Node-Selectors:  <none>
    Tolerations:     node.kubernetes.io/not-ready:NoExecute for 360s
                     node.kubernetes.io/unreachable:NoExecute for 360s
    Events:
      Type    Reason     Age   From               Message
      ----    ------     ----  ----               -------
      Normal  Scheduled  78s   default-scheduler  Successfully assigned default/test-deployment-68cffdb664-2pj89 to kubernetes-node-02
      Normal  Pulled     78s   kubelet            Container image "nginx:latest" already present on machine
      Normal  Created    78s   kubelet            Created container nginx
      Normal  Started    78s   kubelet            Started container nginx
        
    # 进入容器中查看创建的demo.html文件
    [root@kubernetes-master-01 ~]# kubectl exec -it test-deployment-68cffdb664-2pj89 -- bash
    root@test-deployment-68cffdb664-2pj89:/# cd /usr/share/nginx/html/
    root@test-deployment-68cffdb664-2pj89:/usr/share/nginx/html# ls
    50x.html  demo.html  index.html
    
    root@test-deployment-68cffdb664-2pj89:/usr/share/nginx/html# ls -l
    total 8
    -rw-r--r--. 1 root root 494 Dec 15 13:59 50x.html
    -rw-r--r--. 1 root root   0 Mar 15 00:57 demo.html
    -rw-r--r--. 1 root root 612 Dec 15 13:59 index.html
    root@test-deployment-68cffdb664-2pj89:/usr/share/nginx/html# date
    Mon Mar 15 01:00:43 UTC 2021
    root@test-deployment-68cffdb664-2pj89:/usr/share/nginx/html# 
    
    

    image-20210315090425508

    image-20210315090452062

    二、 K8S 监控组件 metrics-server

    1、 创建用户

    Metrics-server 需要读取 kubernetes 中数据,所以需要创建一个有权限的用户来给 metrics-server 使用。

    [root@kubernetes-master-01 ~]#  kubectl create clusterrolebinding system:anonymous --clusterrole=cluster-admin --user=system:anonymous
    
    

    2、 创建配置文件

    # # K8S 监控组件 metrics-server
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      name: system:aggregated-metrics-reader
      labels:
        rbac.authorization.k8s.io/aggregate-to-view: "true"
        rbac.authorization.k8s.io/aggregate-to-edit: "true"
        rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rules:
      - apiGroups: ["metrics.k8s.io"]
        resources: ["pods", "nodes"]
        verbs: ["get", "list", "watch"]
    
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: metrics-server:system:auth-delegator
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:auth-delegator
    
    subjects:
      - kind: ServiceAccount
        name: metrics-server
        namespace: kube-system
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
      name: metrics-server-auth-reader
      namespace: kube-system
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: Role
      name: extension-apiserver-authentication-reader
    subjects:
      - kind: ServiceAccount
        name: metrics-server
        namespace: kube-system
    
    ---
    apiVersion: apiregistration.k8s.io/v1
    kind: APIService
    metadata:
      name: v1beta1.metrics.k8s.io
    spec:
      groupPriorityMinimum: 100
      service:
        name: metrics-server
        namespace: kube-system
      versionPriority: 100
      group: metrics.k8s.io
      version: v1beta1
      insecureSkipTLSVerify: true
    
    ---
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: metrics-server
      namespace: kube-system
    
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: metrics-server
      namespace: kube-system
      labels:
        k8s-app: metrics-server
    spec:
      selector:
        matchLabels:
          k8s-app: metrics-server
      template:
        metadata:
          name: metrics-server
          labels:
            k8s-app: metrics-server
        spec:
          serviceAccountName: metrics-server
          volumes:
            # mount in tmp so we can safely use from-scratch images and/or read-only containers
    
            - name: tmp-dir
              emptyDir: {}
            - name: ca-ssl
              hostPath:
                path: /etc/kubernetes/ssl
          containers:
            - name: metrics-server
              image: registry.cn-hangzhou.aliyuncs.com/k8sos/metrics-server:v0.4.1
              imagePullPolicy: IfNotPresent
              args:
                - --cert-dir=/tmp
                - --secure-port=4443
                - --metric-resolution=30s
                - --kubelet-insecure-tls
                - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
                - --requestheader-username-headers=X-Remote-User
                - --requestheader-group-headers=X-Remote-Group
                - --requestheader-extra-headers-prefix=X-Remote-Extra-
    
              ports:
                - name: main-port
                  containerPort: 4443
                  protocol: TCP
    
              securityContext:
                readOnlyRootFilesystem: true
                runAsNonRoot: true
                runAsUser: 1000
              volumeMounts:
                - name: tmp-dir
                  mountPath: /tmp
    
                - name: ca-ssl
                  mountPath: /etc/kubernetes/ssl
          nodeSelector:
            kubernetes.io/os: linux
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: metrics-server
      namespace:  kube-system
      labels:
        kubernetes.io/name: "Metrics-server"
        kubernetes.io/cluster-service: "true"
    spec:
      selector:
        k8s-app: metrics-server
      ports:
        - port: 443
          protocol: TCP
          targetPort: main-port
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      name: system:metrics-server
    rules:
      - apiGroups:
        - ""
        resources:
        - pods
        - nodes
        - nodes/stats
        - namespaces
        - configmaps
        verbs:
          - get
          - list
          - watch
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: system:metrics-server
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:metrics-server
    subjects:
      - kind: ServiceAccount
        name: metrics-server
        namespace: kube-system
    

    2.1、 复制上面的配置文件,创建对应的服务

    kubectl apply -f components-metrics-server.yaml
    kubectl get pods -n kube-system
    
    [root@kubernetes-master-01 ~]# kubectl apply -f components-metrics-server.yaml 
    clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
    clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
    rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
    apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
    serviceaccount/metrics-server created
    deployment.apps/metrics-server created
    service/metrics-server created
    clusterrole.rbac.authorization.k8s.io/system:metrics-server created
    clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
    [root@kubernetes-master-01 ~]# kubectl get pods -n kube-system 
    NAME                              READY   STATUS    RESTARTS   AGE
    coredns-6f5774d776-92h9j          1/1     Running   7          37d
    metrics-server-787d9dbb5d-6fr6b   1/1     Running   0          17s
    [root@kubernetes-master-01 ~]# kubectl top nodes
    NAME                   CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
    kubernetes-master-01   914m         91%    1180Mi          62%       
    kubernetes-master-03   86m          8%     1321Mi          70%       
    kubernetes-master02    325m         32%    1376Mi          73%       
    kubernetes-node-01     17m          1%     676Mi           35%       
    kubernetes-node-02     16m          1%     475Mi           25%  
    

    image-20210317222913944

    如果出现问题

    kubectl get apiservices
    
    [root@kubernetes-master-01 ~]# kubectl get apiservices
    NAME                                   SERVICE                      AVAILABLE   AGE
    v1.                                    Local                        True        46d
    v1.admissionregistration.k8s.io        Local                        True        46d
    v1.apiextensions.k8s.io                Local                        True        46d
    v1.apps                                Local                        True        46d
    v1.authentication.k8s.io               Local                        True        46d
    v1.authorization.k8s.io                Local                        True        46d
    v1.autoscaling                         Local                        True        46d
    v1.batch                               Local                        True        46d
    v1.coordination.k8s.io                 Local                        True        46d
    v1.networking.k8s.io                   Local                        True        46d
    v1.rbac.authorization.k8s.io           Local                        True        46d
    v1.scheduling.k8s.io                   Local                        True        46d
    v1.storage.k8s.io                      Local                        True        46d
    v1beta1.admissionregistration.k8s.io   Local                        True        46d
    v1beta1.apiextensions.k8s.io           Local                        True        46d
    v1beta1.authentication.k8s.io          Local                        True        46d
    v1beta1.authorization.k8s.io           Local                        True        46d
    v1beta1.batch                          Local                        True        46d
    v1beta1.certificates.k8s.io            Local                        True        46d
    v1beta1.coordination.k8s.io            Local                        True        46d
    v1beta1.discovery.k8s.io               Local                        True        46d
    v1beta1.events.k8s.io                  Local                        True        46d
    v1beta1.extensions                     Local                        True        46d
    v1beta1.metrics.k8s.io                 kube-system/metrics-server   True        12m
    v1beta1.networking.k8s.io              Local                        True        46d
    v1beta1.node.k8s.io                    Local                        True        46d
    v1beta1.policy                         Local                        True        46d
    v1beta1.rbac.authorization.k8s.io      Local                        True        46d
    v1beta1.scheduling.k8s.io              Local                        True        46d
    v1beta1.storage.k8s.io                 Local                        True        46d
    v2beta1.autoscaling                    Local                        True        46d
    v2beta2.autoscaling                    Local                        True        46d
    

    image-20210317224156533

    三、 HPA 自动伸缩

    在生产环境中,总会有一些意想不到的事情发生,比如公司网站流量突然升高,此时之前创建的 Pod 已不足 以撑住所有的访问,而运维人员也不可能 24 小时守着业务服务,这时就可以通过配置 HPA,实现负载过高的情 况下自动扩容 Pod 副本数以分摊高并发的流量,当流量恢复正常后,HPA 会自动缩减 Pod 的数量。HPA 是根据 CPU 的使用率、内存使用率自动扩展 Pod 数量的,所以要使用 HPA 就必须定义 Requests 参数

    1.1、 创建 HPA

    # HPA伸缩容 HPA 自动伸缩
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      name: nginx-hpa
      namespace: default
      labels:
        app: nginx
    spec:
      selector:
        matchLabels:
          app: nginx
          test: hpa
      template:
        metadata:
          labels:
            app: nginx
            test: hpa
        spec:
          containers:
            - name: nginx
              image: nginx
              ports:
                - containerPort: 80
              resources:
                limits:
                  cpu: 10m
                  memory: 50Mi
                requests:
                  cpu: 10m
                  memory: 50Mi
    ---
    kind: Service
    apiVersion: v1
    metadata:
      namespace: default
      name: hpa-svc
    spec:
      selector:
        app: nginx
        test: hpa
      ports:
        - port: 80
          targetPort: 80
    ---
    kind: HorizontalPodAutoscaler
    apiVersion: autoscaling/v2beta1
    metadata:
      name: docs
      namespace: default
    spec:
      # HPA的大pod数量和最少pod数量
      maxReplicas: 10
      minReplicas: 1
      # HPA的伸缩对象描述,HPA会动态修改对象的pods数量
      scaleTargetRef:
        kind: Deployment
        name: nginx-hpa
        apiVersion: apps/v1
      # 监控的指标数组,支持多种类型的指标共存
      metrics:
        - type: Resource
          # 核心指标,包含 cpu 和内存两种(被弹性伸缩的 pod 对象中容器的 requests 和 limits 中定义的指标。)
          resource:
            name: cup
            # CPU 阈值
            # 计算公式:所有目标 pod 的 metric 的使用率(百分比)的平均值,
            # 例如 limit.cpu=1000m,实际使用 500m,则 utilization=50%
            # 例如 deployment.replica=3, limit.cpu=1000m,则 pod1 实际使用 cpu=500m, pod2=300m,pod=600m
            ## 则 averageUtilization=(500/1000+300/1000+600/1000)/3 = (500 + 300 +600)/(3*1000))
            targetAverageUtilization: 5
    
    

    执行

    [root@kubernetes-master-01 ~]# vi hpa-metrics-server.yaml 
    [root@kubernetes-master-01 ~]# kubectl apply -f hpa-metrics-server.yaml 
    deployment.apps/nginx-hpa created
    service/hpa-svc created
    horizontalpodautoscaler.autoscaling/docs created
    [root@kubernetes-master-01 ~]# kubectl get pods -l test=hpa
    NAME                       READY   STATUS    RESTARTS   AGE
    nginx-hpa-569cc66c-hcc2k   1/1     Running   0          47s
    [root@kubernetes-master-01 ~]# kubectl get svc
    NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
    hpa-svc      ClusterIP   10.96.208.122   <none>        80/TCP         2m15s
    kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        49d
    nginx        NodePort    10.96.106.13    <none>        80:26755/TCP   40d
    
    

    image-20210321101025691

    对hpa-svc进行加压

    wget -O- -q http://10.96.208.122
    
    kubectl run test -it --rm --image=busybox:1.28.3
    while true;  do wget -O- -q http://10.96.220.95  ; done
    

    image-20210321101212921

    image-20210321105109502

    1.2、 测试

    四、 Nginx Ingress

    Ingress 为 Kubernetes 集群中的服务提供了入口,可以提供负载均衡、SSL 终止和基于名称的虚拟主机,在生 产环境中常用的 Ingress 有 Treafik、Nginx、HAProxy、Istio 等。在 Kubernetesv 1.1 版中添加的 Ingress 用于从集群 外部到集群内部 Service 的 HTTP 和 HTTPS 路由,流量从 Internet 到 Ingress 再到 Services 最后到 Pod 上,通常情 况下,Ingress 部署在所有的 Node 节点上。Ingress 可以配置提供服务外部访问的 URL、负载均衡、终止 SSL,并 提供基于域名的虚拟主机。但 Ingress 不会暴露任意端口或协

    1.1、 安装 nginx ingress

    # 官网地址
    https://github.com/kubernetes/ingress-nginx
    https://kubernetes.github.io/ingress-nginx/
    # 部署地址
    https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.44.0/deploy/static/provider/baremetal/deploy.yaml
        
        
    # ingree-nginx.yaml
    
    apiVersion: v1
    kind: Namespace
    metadata:
      name: ingress-nginx
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
    
    ---
    # Source: ingress-nginx/templates/controller-serviceaccount.yaml
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: controller
      name: ingress-nginx
      namespace: ingress-nginx
    ---
    # Source: ingress-nginx/templates/controller-configmap.yaml
    apiVersion: v1
    kind: ConfigMap
    metadata:
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: controller
      name: ingress-nginx-controller
      namespace: ingress-nginx
    data:
    ---
    # Source: ingress-nginx/templates/clusterrole.yaml
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
      name: ingress-nginx
    rules:
      - apiGroups:
          - ''
        resources:
          - configmaps
          - endpoints
          - nodes
          - pods
          - secrets
        verbs:
          - list
          - watch
      - apiGroups:
          - ''
        resources:
          - nodes
        verbs:
          - get
      - apiGroups:
          - ''
        resources:
          - services
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - extensions
          - networking.k8s.io   # k8s 1.14+
        resources:
          - ingresses
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - ''
        resources:
          - events
        verbs:
          - create
          - patch
      - apiGroups:
          - extensions
          - networking.k8s.io   # k8s 1.14+
        resources:
          - ingresses/status
        verbs:
          - update
      - apiGroups:
          - networking.k8s.io   # k8s 1.14+
        resources:
          - ingressclasses
        verbs:
          - get
          - list
          - watch
    ---
    # Source: ingress-nginx/templates/clusterrolebinding.yaml
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
      name: ingress-nginx
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: ingress-nginx
    subjects:
      - kind: ServiceAccount
        name: ingress-nginx
        namespace: ingress-nginx
    ---
    # Source: ingress-nginx/templates/controller-role.yaml
    apiVersion: rbac.authorization.k8s.io/v1
    kind: Role
    metadata:
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: controller
      name: ingress-nginx
      namespace: ingress-nginx
    rules:
      - apiGroups:
          - ''
        resources:
          - namespaces
        verbs:
          - get
      - apiGroups:
          - ''
        resources:
          - configmaps
          - pods
          - secrets
          - endpoints
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - ''
        resources:
          - services
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - extensions
          - networking.k8s.io   # k8s 1.14+
        resources:
          - ingresses
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - extensions
          - networking.k8s.io   # k8s 1.14+
        resources:
          - ingresses/status
        verbs:
          - update
      - apiGroups:
          - networking.k8s.io   # k8s 1.14+
        resources:
          - ingressclasses
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - ''
        resources:
          - configmaps
        resourceNames:
          - ingress-controller-leader-nginx
        verbs:
          - get
          - update
      - apiGroups:
          - ''
        resources:
          - configmaps
        verbs:
          - create
      - apiGroups:
          - ''
        resources:
          - events
        verbs:
          - create
          - patch
    ---
    # Source: ingress-nginx/templates/controller-rolebinding.yaml
    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: controller
      name: ingress-nginx
      namespace: ingress-nginx
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: Role
      name: ingress-nginx
    subjects:
      - kind: ServiceAccount
        name: ingress-nginx
        namespace: ingress-nginx
    ---
    # Source: ingress-nginx/templates/controller-service-webhook.yaml
    apiVersion: v1
    kind: Service
    metadata:
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: controller
      name: ingress-nginx-controller-admission
      namespace: ingress-nginx
    spec:
      type: ClusterIP
      ports:
        - name: https-webhook
          port: 443
          targetPort: webhook
      selector:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/component: controller
    ---
    # Source: ingress-nginx/templates/controller-service.yaml
    apiVersion: v1
    kind: Service
    metadata:
      annotations:
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: controller
      name: ingress-nginx-controller
      namespace: ingress-nginx
    spec:
      type: NodePort
      ports:
        - name: http
          port: 80
          protocol: TCP
          targetPort: http
        - name: https
          port: 443
          protocol: TCP
          targetPort: https
      selector:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/component: controller
    ---
    # Source: ingress-nginx/templates/controller-deployment.yaml
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: controller
      name: ingress-nginx-controller
      namespace: ingress-nginx
    spec:
      selector:
        matchLabels:
          app.kubernetes.io/name: ingress-nginx
          app.kubernetes.io/instance: ingress-nginx
          app.kubernetes.io/component: controller
      revisionHistoryLimit: 10
      minReadySeconds: 0
      template:
        metadata:
          labels:
            app.kubernetes.io/name: ingress-nginx
            app.kubernetes.io/instance: ingress-nginx
            app.kubernetes.io/component: controller
        spec:
          dnsPolicy: ClusterFirst
          containers:
            - name: controller
              image: k8s.gcr.io/ingress-nginx/controller:v0.44.0@sha256:3dd0fac48073beaca2d67a78c746c7593f9c575168a17139a9955a82c63c4b9a
              imagePullPolicy: IfNotPresent
              lifecycle:
                preStop:
                  exec:
                    command:
                      - /wait-shutdown
              args:
                - /nginx-ingress-controller
                - --election-id=ingress-controller-leader
                - --ingress-class=nginx
                - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
                - --validating-webhook=:8443
                - --validating-webhook-certificate=/usr/local/certificates/cert
                - --validating-webhook-key=/usr/local/certificates/key
              securityContext:
                capabilities:
                  drop:
                    - ALL
                  add:
                    - NET_BIND_SERVICE
                runAsUser: 101
                allowPrivilegeEscalation: true
              env:
                - name: POD_NAME
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.name
                - name: POD_NAMESPACE
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.namespace
                - name: LD_PRELOAD
                  value: /usr/local/lib/libmimalloc.so
              livenessProbe:
                httpGet:
                  path: /healthz
                  port: 10254
                  scheme: HTTP
                initialDelaySeconds: 10
                periodSeconds: 10
                timeoutSeconds: 1
                successThreshold: 1
                failureThreshold: 5
              readinessProbe:
                httpGet:
                  path: /healthz
                  port: 10254
                  scheme: HTTP
                initialDelaySeconds: 10
                periodSeconds: 10
                timeoutSeconds: 1
                successThreshold: 1
                failureThreshold: 3
              ports:
                - name: http
                  containerPort: 80
                  protocol: TCP
                - name: https
                  containerPort: 443
                  protocol: TCP
                - name: webhook
                  containerPort: 8443
                  protocol: TCP
              volumeMounts:
                - name: webhook-cert
                  mountPath: /usr/local/certificates/
                  readOnly: true
              resources:
                requests:
                  cpu: 100m
                  memory: 90Mi
          nodeSelector:
            kubernetes.io/os: linux
          serviceAccountName: ingress-nginx
          terminationGracePeriodSeconds: 300
          volumes:
            - name: webhook-cert
              secret:
                secretName: ingress-nginx-admission
    ---
    # Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
    # before changing this value, check the required kubernetes version
    # https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
    apiVersion: admissionregistration.k8s.io/v1
    kind: ValidatingWebhookConfiguration
    metadata:
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: admission-webhook
      name: ingress-nginx-admission
    webhooks:
      - name: validate.nginx.ingress.kubernetes.io
        matchPolicy: Equivalent
        rules:
          - apiGroups:
              - networking.k8s.io
            apiVersions:
              - v1beta1
            operations:
              - CREATE
              - UPDATE
            resources:
              - ingresses
        failurePolicy: Fail
        sideEffects: None
        admissionReviewVersions:
          - v1
          - v1beta1
        clientConfig:
          service:
            namespace: ingress-nginx
            name: ingress-nginx-controller-admission
            path: /networking/v1beta1/ingresses
    ---
    # Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: ingress-nginx-admission
      annotations:
        helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
        helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: admission-webhook
      namespace: ingress-nginx
    ---
    # Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      name: ingress-nginx-admission
      annotations:
        helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
        helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: admission-webhook
    rules:
      - apiGroups:
          - admissionregistration.k8s.io
        resources:
          - validatingwebhookconfigurations
        verbs:
          - get
          - update
    ---
    # Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: ingress-nginx-admission
      annotations:
        helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
        helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: admission-webhook
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: ingress-nginx-admission
    subjects:
      - kind: ServiceAccount
        name: ingress-nginx-admission
        namespace: ingress-nginx
    ---
    # Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
    apiVersion: rbac.authorization.k8s.io/v1
    kind: Role
    metadata:
      name: ingress-nginx-admission
      annotations:
        helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
        helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: admission-webhook
      namespace: ingress-nginx
    rules:
      - apiGroups:
          - ''
        resources:
          - secrets
        verbs:
          - get
          - create
    ---
    # Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
      name: ingress-nginx-admission
      annotations:
        helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
        helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: admission-webhook
      namespace: ingress-nginx
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: Role
      name: ingress-nginx-admission
    subjects:
      - kind: ServiceAccount
        name: ingress-nginx-admission
        namespace: ingress-nginx
    ---
    # Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
    apiVersion: batch/v1
    kind: Job
    metadata:
      name: ingress-nginx-admission-create
      annotations:
        helm.sh/hook: pre-install,pre-upgrade
        helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: admission-webhook
      namespace: ingress-nginx
    spec:
      template:
        metadata:
          name: ingress-nginx-admission-create
          labels:
            helm.sh/chart: ingress-nginx-3.23.0
            app.kubernetes.io/name: ingress-nginx
            app.kubernetes.io/instance: ingress-nginx
            app.kubernetes.io/version: 0.44.0
            app.kubernetes.io/managed-by: Helm
            app.kubernetes.io/component: admission-webhook
        spec:
          containers:
            - name: create
              image: docker.io/jettech/kube-webhook-certgen:v1.5.1
              imagePullPolicy: IfNotPresent
              args:
                - create
                - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
                - --namespace=$(POD_NAMESPACE)
                - --secret-name=ingress-nginx-admission
              env:
                - name: POD_NAMESPACE
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.namespace
          restartPolicy: OnFailure
          serviceAccountName: ingress-nginx-admission
          securityContext:
            runAsNonRoot: true
            runAsUser: 2000
    ---
    # Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
    apiVersion: batch/v1
    kind: Job
    metadata:
      name: ingress-nginx-admission-patch
      annotations:
        helm.sh/hook: post-install,post-upgrade
        helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
      labels:
        helm.sh/chart: ingress-nginx-3.23.0
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/instance: ingress-nginx
        app.kubernetes.io/version: 0.44.0
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/component: admission-webhook
      namespace: ingress-nginx
    spec:
      template:
        metadata:
          name: ingress-nginx-admission-patch
          labels:
            helm.sh/chart: ingress-nginx-3.23.0
            app.kubernetes.io/name: ingress-nginx
            app.kubernetes.io/instance: ingress-nginx
            app.kubernetes.io/version: 0.44.0
            app.kubernetes.io/managed-by: Helm
            app.kubernetes.io/component: admission-webhook
        spec:
          containers:
            - name: patch
              image: docker.io/jettech/kube-webhook-certgen:v1.5.1
              imagePullPolicy: IfNotPresent
              args:
                - patch
                - --webhook-name=ingress-nginx-admission
                - --namespace=$(POD_NAMESPACE)
                - --patch-mutating=false
                - --secret-name=ingress-nginx-admission
                - --patch-failure-policy=Fail
              env:
                - name: POD_NAMESPACE
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.namespace
          restartPolicy: OnFailure
          serviceAccountName: ingress-nginx-admission
          securityContext:
            runAsNonRoot: true
            runAsUser: 2000
    
    # 查看依赖镜像
    [root@kubernetes-master-01 ~]# cat ingress-nginxdeploy.yaml  | grep image
              image: k8s.gcr.io/ingress-nginx/controller:v0.44.0@sha256:3dd0fac48073beaca2d67a78c746c7593f9c575168a17139a9955a82c63c4b9a
              imagePullPolicy: IfNotPresent
              image: docker.io/jettech/kube-webhook-certgen:v1.5.1
              imagePullPolicy: IfNotPresent
              image: docker.io/jettech/kube-webhook-certgen:v1.5.1
              imagePullPolicy: IfNotPresent
    # 仓库构建
    https://code.aliyun.com/RandySun121/k8s/blob/master/ingress-controller/Dockerfile
    https://cr.console.aliyun.com/repository/cn-hangzhou/k8s121/ingress-nginx/build
        
    # 创建
    [root@kubernetes-master-01 ~]# kubectl apply -f ingress-nginxdeploy.yaml 
    namespace/ingress-nginx created
    serviceaccount/ingress-nginx created
    configmap/ingress-nginx-controller created
    clusterrole.rbac.authorization.k8s.io/ingress-nginx created
    clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx created
    role.rbac.authorization.k8s.io/ingress-nginx created
    rolebinding.rbac.authorization.k8s.io/ingress-nginx created
    service/ingress-nginx-controller-admission created
    service/ingress-nginx-controller created
    deployment.apps/ingress-nginx-controller created
    validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission created
    serviceaccount/ingress-nginx-admission created
    clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission created
    clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
    role.rbac.authorization.k8s.io/ingress-nginx-admission created
    rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
    job.batch/ingress-nginx-admission-create created
    job.batch/ingress-nginx-admission-patch created
    
    # 检查安装是否成功
    [root@kubernetes-master-01 ~]# kubectl get pods -n ingress-nginx
    NAME                                       READY   STATUS      RESTARTS   AGE
    ingress-nginx-admission-create-b449r       0/1     Completed   0          3m34s
    ingress-nginx-admission-patch-mqbcr        0/1     Completed   0          3m34s
    ingress-nginx-controller-88996f96c-zmql5   1/1     Running     0          3m35s
    
    

    image-20210321122012906

    测试

    image-20210321143501925

    [root@kubernetes-master-01 ~]# kubectl explain Ingress
    KIND:     Ingress
    VERSION:  extensions/v1beta1
    
    
    
    kind: Ingress
    # kubectl explain Ingress
    apiVersion:  extensions/v1beta1
    metadata:
      namespace: default
      name: test-ingress
    
    spec:
      rules:
        - host: www.test.com
          http:
            paths:
              - backend:
                  serviceName: hpa-svc
                  servicePort: 80
                path: /
                    
                    
    [root@kubernetes-master-01 ~]# vi test-ingress.yaml
    # 部署
    [root@kubernetes-master-01 ~]# kubectl apply -f test-ingress.yaml 
    ingress.extensions/test-ingress created
    # 查看部署结果
    [root@kubernetes-master-01 ~]# kubectl get ingress
    NAME           CLASS    HOSTS              ADDRESS   PORTS   AGE
    test-ingress   <none>   www.randysun.com             80      9s
    [root@kubernetes-master-01 ~]# kubectl get ingress
    NAME           CLASS    HOSTS              ADDRESS   PORTS   AGE
    test-ingress   <none>   www.randysun.com             80      32s
    [root@kubernetes-master-01 ~]# kubectl get ingress
    NAME           CLASS    HOSTS              ADDRESS       PORTS   AGE
    test-ingress   <none>   www.randysun.com   172.16.0.52   80      89s
    # 查看访问ip
    [root@kubernetes-master-01 ~]# kubectl get svc -n ingress-nginx
    NAME                                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                     AGE
    ingress-nginx-controller             NodePort    10.96.175.228   <none>        80:4915/TCP,443:20134/TCP   81m
    ingress-nginx-controller-admission   ClusterIP   10.96.86.189    <none>        443/TCP                     81m
    
    
    

    image-20210321141735766

    image-20210321141655960

    image-20210321141629874

    1.2、 基于 TLS 的 Ingress

    1.2.1、创建证书

    创建证书,生产环境的证书为公司购买的证书。

    openssl genrsa -out tls.key 2048
    openssl req -new -x509 -key tls.key -out tls.crt -subj -C=CN/ST=ShangHai/L=ShangHai/O=Ingress/CN=www.test.com
    
    
    [root@kubernetes-master-01 ~]# mkdir cert
    [root@kubernetes-master-01 ~]# cd cert/
    [root@kubernetes-master-01 cert]# openssl genrsa -out tls.key 2048
    Generating RSA private key, 2048 bit long modulus
    .....................................+++
    ...+++
    e is 65537 (0x10001)
    [root@kubernetes-master-01 cert]# ls
    tls.key
    [root@kubernetes-master-01 cert]# openssl req -new -x509 -key tls.key -out tls.crt -subj /C=CN/ST=ShangHai/L=ShangHai/O=Ingress/CN=www.test.com
    [root@kubernetes-master-01 cert]# ll
    total 8
    -rw-r--r--. 1 root root 1289 Mar 21 14:26 tls.crt
    -rw-r--r--. 1 root root 1679 Mar 21 14:26 tls.key
    
    [root@kubernetes-master-01 cert]# kubectl -n default create secret tls ingress-tls --cert=tls.crt --key=tls.key
    secret/ingress-tls created
    [root@kubernetes-master-01 cert]# kubectl get secrets
    NAME                  TYPE                                  DATA   AGE
    default-token-xt6cs   kubernetes.io/service-account-token   3      44d
    ingress-tls           kubernetes.io/tls                     2      28s
    

    image-20210321142859704

    image-20210321143224284

    image-20210321140402770

    1.2.2、定义 Ingress

    ---
    kind: Ingress
    apiVersion:  extensions/v1beta1
    metadata:
      name: ingress-ingress
      namespace: default
      annotations:
        kubernetes.io/ingress.class: "nginx"
    spec:
      tls:
        - secretName: ingress-tls # 通过 kubectl get secrets 命令查看名字
      rules:
        - host: www.test.com
          http:
            paths:
              - backend:
                  serviceName: hpa-svc
                  servicePort: 80
    

    image-20210321145021731

    1.2.2.1、 部署

    [root@kubernetes-master-01 ~]# kubectl apply -f ingress-nginx-tls.yaml 
    ingress.extensions/ingress-ingress created
    [root@kubernetes-master-01 ~]# kubectl get svc -n ingress-nginx 
    NAME                                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                     AGE
    ingress-nginx-controller             NodePort    10.96.175.228   <none>        80:4915/TCP,443:20134/TCP   147m
    ingress-nginx-controller-admission   ClusterIP   10.96.86.189    <none>        443/TCP                     147m
    [root@kubernetes-master-01 ~]# kubectl get ingress
    NAME              CLASS    HOSTS          ADDRESS       PORTS     AGE
    ingress-ingress   <none>   www.test.com   172.16.0.52   80, 443   45s
    
    

    image-20210321145122868

    image-20210321145209329

    五、 数据持久化

    我们知道,Pod 是由容器组成的,而容器宕机或停止之后,数据就随之丢了,那么这也就意味着我们在做 Kubernetes 集群的时候就不得不考虑存储的问题,而存储卷就是为了 Pod 保存数据而生的。存储卷的类型有很多, 我们常用到一般有四种:emptyDir,hostPath,NFS 以及云存储(ceph, glasterfs...)等。

    1、 emptyDir

    emptyDir 类型的 volume 在 pod 分配到 node 上时被创建,kubernetes 会在 node 上自动分配 一个目录,因 此无需指定宿主机 node 上对应的目录文件。这个目录的初始内容为空,当 Pod 从 node 上移除时,emptyDir 中 的数据会被永久删除。emptyDir Volume 主要用于某些应用程序无需永久保存的临时目录。

    kind: Deployment 数据持久化  emptyDir 类型的 volume 在 pod 分配到 node 上时被创建,kubernetes 会在 node 上自动分配 一个目录,因 此**无需指定宿主机 node 上对应的目录文件**。这个目录的初始内容为空,当 Pod 从 node 上移除时,emptyDir 中 的数据会被永久删除。emptyDir Volume 主要用于某些应用程序无需永久保存的临时目录。
    apiVersion: apps/v1
    metadata:
      namespace: default
      name: empty-dir
    spec:
      selector:
        matchLabels:
          app: empty-dir
      template:
        metadata:
          labels:
            app: empty-dir
        spec:
          containers:
            # 第一个容器
            - name: busybox
              image: busybox:1.28.3
              imagePullPolicy: IfNotPresent
              command: ['/bin/sh', '-c', "while true; do echo 'empty-dir busybox' >> /data/index; sleep 1; done"]
              # 挂载存储卷
              volumeMounts:
                - mountPath: /data # 挂在容器里面的路径
                  name: empty-dir-data # 存储卷的名称
            # 第二个容器
            - name: busybox-tow
              image: busybox
              imagePullPolicy: IfNotPresent
              command: ['/bin/sh', '-c', "while true; do echo 'empty-dir busybox-tow' >> /data/index; sleep 1; done"]
              # 挂载存储卷
              volumeMounts:
                - mountPath: /data
                  name: empty-dir-data
          # 定义存储卷
          volumes:
            - name: empty-dir-data
    
    
    [root@kubernetes-master-01 ~]# kubectl get pods -l app=empty-dir
    NAME                         READY   STATUS             RESTARTS   AGE
    empty-dir-565bf99586-shm9t   0/2     CrashLoopBackOff   8          107s
    [root@kubernetes-master-01 ~]# kubectl delete  -f empty-dir-data.yaml 
    deployment.apps "empty-dir" deleted
    # 创建Pods
    [root@kubernetes-master-01 ~]# vi empty-dir-data.yaml 
    [root@kubernetes-master-01 ~]# kubectl apply -f empty-dir-data.yaml 
    deployment.apps/empty-dir created
    
    [root@kubernetes-master-01 ~]# kubectl get pods -l app=empty-dir
    NAME                         READY   STATUS    RESTARTS   AGE
    empty-dir-796cf6fc58-h2rpf   2/2     Running   0          4s
    # 不同的容器操作同一个容器
    [root@kubernetes-master-01 ~]# kubectl exec -it empty-dir-796cf6fc58-h2rpf -c busybox -- sh
    / # tail -f /data/index 
    empty-dir busybox
    empty-dir busybox-tow
    empty-dir busybox
    empty-dir busybox-tow
    empty-dir busybox
    empty-dir busybox-tow
    empty-dir busybox
    empty-dir busybox-tow
    empty-dir busybox
    empty-dir busybox-tow
    empty-dir busybox
    empty-dir busybox-tow
    empty-dir busybox
    empty-dir busybox-tow
    empty-dir busybox
    empty-dir busybox-tow
    empty-dir busybox
    empty-dir busybox-tow
    ^C
    
    

    image-20210321154044343

    image-20210321154602341

    2、 hostPath

    hostPath 类型则是映射 node 文件系统中的文件或者目录到 pod 里。在使用 hostPath 类型的存储卷时,也可 以设置 type 字段,支持的类型有文件、目录、File、Socket、CharDevice 和 BlockD

    kubectl explain pod.spec.volumes.hostPath.type
    https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
        
    kind: Deployment  
    apiVersion: apps/v1
    metadata:
      name: hostpath
    spec:
      selector:
        matchLabels:
          app: hostpath
      template:
        metadata:
          labels:
            app: hostpath
        spec:
          containers:
            - name: hostpath
              image: nginx:1.19.2
              imagePullPolicy: IfNotPresent
              volumeMounts:
                - mountPath: /usr/share/nginx/html # 挂载目录
                  name: hostpath
    
          volumes:
            - name: hostpath # 宿主机
              hostPath:
                path: /opt
                type: DirectoryOrCreate
                    
                    
    

    image-20210321155245085

    [root@kubernetes-master-01 ~]# vim hostpath.yaml
    [root@kubernetes-master-01 ~]# kubectl apply -f hostpath.yaml 
    deployment.apps/hostpath created
    [root@kubernetes-master-01 ~]# kubectl get pods -l app=hostpath
    NAME                        READY   STATUS    RESTARTS   AGE
    hostpath-79f9457bc6-vcb6f   1/1     Running   0          13s
    [root@kubernetes-master-01 ~]# kubectl get pods -l app=hostpaht -o wide
    No resources found in default namespace.
    [root@kubernetes-master-01 ~]# kubectl get pods -l app=hostpath -o wide
    NAME                        READY   STATUS    RESTARTS   AGE   IP             NODE                   NOMINATED NODE   READINESS GATES
    hostpath-79f9457bc6-vcb6f   1/1     Running   0          77s   10.241.136.3   kubernetes-master-01   <none>           <none>
    [root@kubernetes-master-01 ~]# curl 10.241.136.3
    <html>
    <head><title>403 Forbidden</title></head>
    <body>
    <center><h1>403 Forbidden</h1></center>
    <hr><center>nginx/1.19.2</center>
    </body>
    </html>
    [root@kubernetes-master-01 ~]# cd /opt/
    [root@kubernetes-master-01 opt]# echo "String" >> index.html
    [root@kubernetes-master-01 opt]# curl 10.241.136.3
    String
    [root@kubernetes-master-01 opt]# pwd
    /opt
    [root@kubernetes-master-01 opt]# 
    
    

    image-20210321160657601

    3、 PV 和 PVC

    PersistentVolume(PV)是集群中已由管理员配置的一段网络存储。 集群中的资源就像一个节点是一个集群资源。 PV 是诸如卷之类的卷插件,但是具有独立于使用 PV 的任何单个 pod 的生命周期。 该 API 对象捕获存储 的实现细节,即 NFS,iSCSI 或云提供商特定的存储系统

    PersistentVolumeClaim(PVC)是用户存储的请求。PVC 的使用逻辑:在 pod 中定义一个存储卷(该存储卷类 型为 PVC),定义的时候直接指定大小,pvc 必须与对应的 pv 建立关系,pvc 会根据定义去 pv 申请,而 pv 是由 存储空间创建出来的。pv 和 pvc 是 kubernetes 抽象出来的一种存储资源

    3.1、 Nfs

    nfs 使得我们可以挂载已经存在的共享到我们的 Pod 中,和 emptyDir 不同的是,当 Pod 被删除时,emptyDir 也会被删除。但是 nfs 不会被删除,仅仅是解除挂在状态而已,这就意味着 NFS 能够允许我们提前对数据进行处 理,而且这些数据可以在 Pod 之间相互传递,并且 nfs 可以同时被多个 pod 挂在并进行读写。

    3.1.1、再所有节点上安装 nfs

    # 每个节点都要安装
    yum install nfs-utils.x86_64 -y
    

    image-20210321161419135

    3.1.2、配置 nfs

    # 只需要在master节点中配置,注意ip的网段
    [root@kubernetes-master-01 ~]# mkdir -p /nfs/v{1..5}
    [root@kubernetes-master-01 ~]# ls /nfs/
    v1  v2  v3  v4  v5
    [root@kubernetes-master-01 ~]# ls /nfs/v
    v1/ v2/ v3/ v4/ v5/ 
    [root@kubernetes-master-01 ~]# ls /nfs/v1/
    [root@kubernetes-master-01 ~]# chmod -R 777 /nfs/
    [root@kubernetes-master-01 ~]# cat /etc/exports
    # 网段一定要在当前主机范围内
    [root@kubernetes-master-01 ~]# cat > /etc/exports <<EOF
    > /nfs/v1 172.16.0.0/16(rw,no_root_squash)
    > /nfs/v2 172.16.0.0/16(rw,no_root_squash)
    > /nfs/v3 172.16.0.0/16(rw,no_root_squash)
    > /nfs/v4 172.16.0.0/16(rw,no_root_squash)
    > /nfs/v5 172.16.0.0/16(rw,no_root_squash)
    > EOF
    [root@kubernetes-master-01 ~]# systemctl start nfs.service 
    [root@kubernetes-master-01 ~]# showmount -e
    Export list for kubernetes-master-01:
    /nfs/v5 172.16.0.0/16
    /nfs/v4 172.16.0.0/16
    /nfs/v3 172.16.0.0/16
    /nfs/v2 172.16.0.0/16
    /nfs/v1 172.16.0.0/16
    [root@kubernetes-master-01 ~]# systemctl status nfs.service 
    ● nfs-server.service - NFS server and services
       Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; vendor preset: disabled)
       Active: active (exited) since Sun 2021-03-21 16:19:34 CST; 48s ago
      Process: 44751 ExecStartPost=/bin/sh -c if systemctl -q is-active gssproxy; then systemctl reload gssproxy ; fi (code=exited, status=0/SUCCESS)
      Process: 44735 ExecStart=/usr/sbin/rpc.nfsd $RPCNFSDARGS (code=exited, status=0/SUCCESS)
      Process: 44732 ExecStartPre=/usr/sbin/exportfs -r (code=exited, status=0/SUCCESS)
     Main PID: 44735 (code=exited, status=0/SUCCESS)
        Tasks: 0
       Memory: 0B
       CGroup: /system.slice/nfs-server.service
    
    Mar 21 16:19:34 kubernetes-master-01 systemd[1]: Starting NFS server and services...
    Mar 21 16:19:34 kubernetes-master-01 systemd[1]: Started NFS server and services.
    

    image-20210321162253065

    image-20210321162125698

    3.1.3、创建 POD 使用 Nfs

    kind: Deployment
    apiVersion: apps/v1
    metadata:
      name: nfs-server
    spec:
      selector:
        matchLabels:
          app: nginx
      template:
        metadata:
          labels:
            app: nginx
        spec:
          containers:
            - name: nginx
              image: nginx
              imagePullPolicy: IfNotPresent
              volumeMounts:
                - mountPath: /usr/share/nginx/html
                  name: nfs
          volumes:
            - name: nfs
              nfs:
                path: /nfs/v1
                server: 172.16.0.50
    

    执行

    kubectl apply -f nfs-server.yaml
    
    
    [root@kubernetes-master-01 ~]# kubectl get pods -l app=nginx
    NAME                          READY   STATUS    RESTARTS   AGE
    nfs-server-794bc894b8-dhb5x   1/1     Running   0          34s
    
    
    [root@kubernetes-master-01 ~]# kubectl get pods -o wide -w 
    NAME                          READY   STATUS    RESTARTS   AGE     IP             NODE                   NOMINATED NODE   READINESS GATES
    
    nfs-server-794bc894b8-dhb5x   1/1     Running   0          3m      10.241.168.3   kubernetes-node-01     <none>           <none>
    
    # 访问nginx
    [root@kubernetes-master-01 ~]# curl 10.241.168.3
    <html>
    <head><title>403 Forbidden</title></head>
    <body>
    <center><h1>403 Forbidden</h1></center>
    <hr><center>nginx/1.19.6</center>
    </body>
    </html>
    [root@kubernetes-master-01 ~]# cd /nfs/v1/
    [root@kubernetes-master-01 v1]# ls
    [root@kubernetes-master-01 v1]# echo "Nfs" > index.html
    [root@kubernetes-master-01 v1]# curl 10.241.168.3
    Nfs
    

    image-20210321172933821

    image-20210321173204739

    3.2、 PV 的访问模式(accessModes)

    模式 解释
    ReadWriteOnce(RWO) 可读可写,但只支持被单个节点挂载。
    ReadOnlyMany(ROX) 只读,可以被多个节点挂载。
    ReadWriteMany(RWX) 多路可读可写。这种存储可以以读写的方式被多个节点共享。不是每一种存储都支 持这三种方式,像共享方式,目前支持的还比较少,比较常用的是 NFS。在 PVC 绑 定 PV 时通常根据两个条件来绑定,一个是存储的大小,另一个就是访问模式。

    3.3、 PV 的回收策略(persistentVolumeReclaimPolicy)

    策略 解释
    Retain 不清理, 保留 Volume(需要手动清理
    Recycle 删除数据,即 rm -rf /thevolume/*(只有 NFS 和 HostPath 支持
    Delete 删除存储资源,比如删除 AWS EBS 卷(只有 AWS EBS, GCE PD, Azure Disk 和

    3.4、 PV 的状态

    状态 解释
    Available 可用
    Bound 已经分配给 PVC
    Released PVC 解绑但还未执行回收策略。
    Failed 发生错误。

    3.5、 创建 PV

    # 创建pv资源(相当于创建磁盘)
    kind: PersistentVolume
    apiVersion: v1
    metadata:
      name: pv001
      labels:
        app: pv001
    spec:
      nfs:
        path: /nfs/v2
        server: 172.16.0.50
      accessModes: # 访问模式
        - "ReadWriteMany" # 多路可读可写
        - "ReadWriteOnce" # 单个节点可读可写
      capacity:
        storage: 2Gi
    ---
    kind: PersistentVolume
    apiVersion: v1
    metadata:
      name: pv002
      labels:
        app: pv002
    spec:
      nfs:
        path: /nfs/v3
        server: 172.16.0.50
      accessModes:
        - "ReadWriteMany"
        - "ReadWriteOnce"
      capacity:
        storage: 5Gi
      persistentVolumeReclaimPolicy: Delete # pv回收策略
    ---
    kind: PersistentVolume
    apiVersion: v1
    metadata:
      name: pv003
      labels:
        app: pv003
    spec:
      nfs:
        path: /nfs/v4
        server: 172.16.0.50
      accessModes:
        - "ReadWriteMany"
        - "ReadWriteOnce"
      capacity:
        storage: 10Gi
      persistentVolumeReclaimPolicy: Recycle # pv回收策略
    ---
    kind: PersistentVolume
    apiVersion: v1
    metadata:
      name: pv004
      labels:
        app: pv004
    spec:
      nfs:
        path: /nfs/v5
        server: 172.16.0.50
      accessModes:
        - "ReadWriteMany"
        - "ReadWriteOnce"
      capacity:
        storage: 20Gi
      persistentVolumeReclaimPolicy: Retain # pv回收策略
    
    

    执行

    [root@kubernetes-master-01 ~]#  kubectl apply  -f pv.yaml 
    persistentvolume/pv001 created
    persistentvolume/pv002 created
    persistentvolume/pv003 created
    persistentvolume/pv004 created
    [root@kubernetes-master-01 ~]# kubectl get pv
    NAME    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
    pv001   2Gi        RWO,RWX        Retain           Available                                   30s
    pv002   5Gi        RWO,RWX        Delete           Available                                   30s
    pv003   10Gi       RWO,RWX        Recycle          Available                                   30s
    pv004   20Gi       RWO,RWX        Retain           Available                                   30s
    

    image-20210321182640422

    3.5.1、查看 pv

    [root@kubernetes-master-01 ~]# kubectl get pv
    NAME    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
    pv001   2Gi        RWO,RWX        Retain           Available                                   30s
    pv002   5Gi        RWO,RWX        Delete           Available                                   30s
    pv003   10Gi       RWO,RWX        Recycle          Available                                   30s
    pv004   20Gi       RWO,RWX        Retain           Available                                   30s
    

    image-20210321182658649

    3.6、 使用 pv,创建pvc

    # 创建Pvc使用pv
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: pvc # claimName的名字要保持一致
      namespace: default
    spec:
      accessModes:
        - "ReadWriteMany"
      resources:
        requests:
          storage: "6Gi" # 最少资源六个G,会选择最贴近的pv
    
    ---
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      name: nfs-pv-pvc
      namespace: default
    spec:
      selector:
        matchLabels:
          app: nfs
      template:
        metadata:
          labels:
            app: nfs
        spec:
          containers:
            - name: nginx
              image: nginx
              imagePullPolicy: IfNotPresent
              volumeMounts:
                - mountPath: /usr/share/nginx/html/
                  name: html
          volumes:
            - name: html
              persistentVolumeClaim:
                claimName: pvc
      replicas: 2
    

    执行

    [root@kubernetes-master-01 ~]# kubectl apply -f pvc.yaml 
    persistentvolumeclaim/pvc unchanged
    deployment.apps/nfs-pv-pvc created
    
    

    image-20210321181553926

    3.7、 查看 pvc

    [root@kubernetes-master-01 ~]# kubectl apply -f pvc.yaml 
    persistentvolumeclaim/pvc created
    deployment.apps/nfs-pv-pvc created
    [root@kubernetes-master-01 ~]# kubectl get pv,pvc
    NAME                     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM         STORAGECLASS   REASON   AGE
    persistentvolume/pv001   2Gi        RWO,RWX        Retain           Available                                         48s
    persistentvolume/pv002   5Gi        RWO,RWX        Delete           Available                                         48s
    persistentvolume/pv003   10Gi       RWO,RWX        Recycle          Bound       default/pvc                           48s
    persistentvolume/pv004   20Gi       RWO,RWX        Retain           Available                                         47s
    
    NAME                        STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
    persistentvolumeclaim/pvc   Bound    pv003    10Gi       RWO,RWX                       26s
    
    # 查看详细信息
    [root@kubernetes-master-01 ~]# kubectl describe pv pv003
    Name:            pv003
    Labels:          app=pv003
    Annotations:     pv.kubernetes.io/bound-by-controller: yes
    Finalizers:      [kubernetes.io/pv-protection]
    StorageClass:    
    Status:          Bound
    Claim:           default/pvc
    Reclaim Policy:  Recycle
    Access Modes:    RWO,RWX
    VolumeMode:      Filesystem
    Capacity:        10Gi
    Node Affinity:   <none>
    Message:         
    Source:
        Type:      NFS (an NFS mount that lasts the lifetime of a pod)
        Server:    170.16.0.50
        Path:      /nfs/v4
        ReadOnly:  false
    Events:        <none>
    
    # 查看详细信息
    [root@kubernetes-master-01 ~]# kubectl get pods -o wide
    NAME                          READY   STATUS              RESTARTS   AGE     IP             NODE                   NOMINATED NODE   READINESS GATES
    empty-dir-796cf6fc58-h2rpf    2/2     Running             0          172m    10.241.24.3    kubernetes-node-02     <none>           <none>
    hostpath-79f9457bc6-vcb6f     1/1     Running             0          146m    10.241.136.3   kubernetes-master-01   <none>           <none>
    nfs-pv-pvc-67f9f546f7-897d4   1/1     Running             0          66s     10.241.168.3   kubernetes-node-01     <none>           <none>
    nfs-pv-pvc-67f9f546f7-8ngw6   1/1     Running             0          66s     10.241.136.4   kubernetes-master-01   <none>           <none>
    nginx-569cc66c-7mnwc          1/1     Running             0          8h      10.241.24.2    kubernetes-node-02     <none>           <none>
    nginx-hpa-fb47f9ccb-zj2hg     1/1     Running             0          7h40m   10.240.56.2    kubernetes-master-03   <none>           <none>
    nginx-pod                     1/1     Running             7          14d     10.242.168.2   kubernetes-master02    <none>           <none>
    recycler-for-pv003            0/1     ContainerCreating   0          2m47s   <none>         kubernetes-master-03   <none>           <none>
    test                          1/1     Running             0          6h49m   10.241.136.2   kubernetes-master-01   <none>           <none>
    
    # 访问pods
    [root@kubernetes-master-01 ~]# curl 10.241.168.3
    <html>
    <head><title>403 Forbidden</title></head>
    <body>
    <center><h1>403 Forbidden</h1></center>
    <hr><center>nginx/1.19.6</center>
    </body>
    </html>
    
    [root@kubernetes-master-01 v4]# echo "pv-pvc" > index.html
    [root@kubernetes-master-01 v4]# curl 10.241.168.3
    pv-pvc
    
    

    image-20210321180138707

    image-20210321181647362

    image-20210321181831507

    image-20210321182838939

    image-20210321183118292

    4、 StorageClass

    在一个大规模的Kubernetes集群里,可能有成千上万个 PVC,这就意味着运维人员必须实现创建出这个多个PV, 此外,随着项目的需要,会有新的 PVC 不断被提交,那么运维人员就需要不断的添加新的,满足要求的 PV,否则新的 Pod 就会因为 PVC 绑定不到 PV 而导致创建失败。而且通过 PVC 请求到一定的存储空间也很有可能不足以满足应 用对于存储设备的各种需求,而且不同的应用程序对于存储性能的要求可能也不尽相同,比如读写速度、并发性 能等,为了解决这一问题,Kubernetes 又为我们引入了一个新的资源对象:StorageClass,通过 StorageClass 的 定义,管理员可以将存储资源定义为某种类型的资源,比如快速存储、慢速存储等,kubernetes 根据 StorageClass 的描述就可以非常直观的知道各种存储资源的具体特性了,这样就可以根据应用的特性去申请合适的存储资源 了。

    4.1、 定义 StorageClass

    每一个存储类都包含 provisioner、parameters 和 reclaimPolicy 这三个参数域,当一个属于某个类的 PersistentVolume 需要被动态提供时,将会使用上述的参数域。

    # 自动创建pv和pvc
    # 下载 helm
    https://github.com/helm/helm/releases/tag/v3.5.3 # 
    wget https://get.helm.sh/helm-v3.3.4-linux-amd64.tar.gz
    
    # 解压
    tar xf helm-v3.3.4-linux-amd64.tar.gz
    # 安装
    mv linux-amd64/helm /usr/local/bin/
    # 验证
    helm version
    
    # 添加阿里云镜像仓库
    helm repo add ali-stable https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
    
    # 添加官方仓库
    helm repo add stable https://kubernetes-charts.storage.googleapis.com/
    
    
    # 添加微软云 helm 仓库
    helm repo add azure http://mirror.azure.cn/kubernetes/charts
        
    [root@kubernetes-master-01 linux-amd64]# helm repo list
    NAME      	URL                                                   
    ali-stable	https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
    azure     	http://mirror.azure.cn/kubernetes/charts 
    

    image-20210321195149090

    image-20210321195508594

    安装nfs客户端

    # 下载 nfs 客户端
    helm pull azure/nfs-client-provisioner
    tar xf nfs-client-provisioner-1.2.11.tgz
    cd nfs-client-provisioner/
    # 修改相关配置
    vim values.yaml
    # 安装 nfs client
    [root@kubernetes-master-01 nfs-client-provisioner]# pwd
    /root/nfs-client-provisioner
    [root@kubernetes-master-01 nfs-client-provisioner]# helm install hfs-client ./
    WARNING: This chart is deprecated
    NAME: hfs-client
    LAST DEPLOYED: Sun Mar 21 21:49:13 2021
    NAMESPACE: default
    STATUS: deployed
    REVISION: 1
    TEST SUITE: None
    # 查看安装客户端
    [root@kubernetes-master-01 nfs-client-provisioner]# helm list
    NAME      	NAMESPACE	REVISION	UPDATED                               	STATUS  	CHART                        	APP VERSION
    hfs-client	default  	1       	2021-03-21 21:49:13.26492902 +0800 CST	deployed	nfs-client-provisioner-1.2.11	3.1.0  
    

    image-20210321195958215

    image-20210321214212029

    image-20210321214100951

    image-20210321215238525

    失败

    image-20210321215742475

    自动创建pv和pvc
    # 自动创建pv和pvc
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      namespace: default
      name: test-nfs
      labels:
        app: test-nfs
    spec:
      accessModes:
        - ReadWriteMany
      storageClassName: nfs-client # 要与nfs安装的客户端名字一致
      resources:
        requests:
          storage: 8Gi
    ---
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      name: test-nfs-storageclass
      namespace: default
      labels:
        app: test-nfs
    spec:
      selector:
        matchLabels:
          app: test-nfs
    
      template:
        metadata:
          labels:
            app: test-nfs
        spec:
          containers:
            - name: nginx
              image: nginx
              imagePullPolicy: IfNotPresent
              volumeMounts:
                - mountPath: /usr/share/nginx/html
                  name: test-nfs
          volumes:
            - name: test-nfs
              persistentVolumeClaim:
                claimName: test-nfs
    

    image-20210321221545269

    image-20210321221614699

    六、 K8S 配置中心

    在生产环境中经常会遇到需要修改配置文件的情况,传统的修改方式不仅会影响到服务的正常运行,而且操 作步骤也很繁琐。为了解决这个问题,kubernetes 项目从 1.2 版本引入了 ConfigMap 功能,用于将应用的配置信 息与程序的分离。这种方式不仅可以实现应用程序被的复用,而且还可以通过不同的配置实现更灵活的功能。在 创建容器时,用户可以将应用程序打包为容器镜像后,通过环境变量或者外接挂载文件的方式进行配置注入。 ConfigMap && Secret 是 K8S 中的针对应用的配置中心,它有效的解决了应用挂载的问题,并且支持加密以及热 更新等功能,可以说是一个 k8s 提供的一件非常好用的功能。

    1、 创建 configmap

    将配置信息直接写入到ETCD中直接读取配置信息

    1.1、 指定配置文件

    kubectl create configmap 配置文件名字 --from-file=*.yaml文件
    
    # 创建configmap配置文件
    [root@kubernetes-master-01 ~]# kubectl create configmap test-configmap --from-file=sc-nfs.yaml 
    configmap/test-configmap created
    
    # 获取配置文件
    [root@kubernetes-master-01 ~]# kubectl get configmaps 
    NAME             DATA   AGE
    test-configmap   1      12s
    
    # 查看配置文件信息
    [root@kubernetes-master-01 ~]# kubectl describe configmaps test-configmap 
    Name:         test-configmap
    Namespace:    default
    Labels:       <none>
    Annotations:  <none>
    
    Data
    ====
    sc-nfs.yaml:
    ----
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      namespace: default
      name: test-nfs
      labels:
        app: test-nfs
    spec:
      accessModes:
        - ReadWriteMany
      storageClassName: nfs-client # 要与nfs安装的客户端名字一致
      resources:
        requests:
          storage: 8Gi
    ---
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      name: test-nfs-storageclass
      namespace: default
      labels:
        app: test-nfs
    spec:
      selector:
        matchLabels:
          app: test-nfs
    
      template:
        metadata:
          labels:
            app: test-nfs
        spec:
          containers:
            - name: nginx
              image: nginx
              imagePullPolicy: IfNotPresent
              volumeMounts:
                - mountPath: /usr/share/nginx/html
                  name: test-nfs
          volumes:
            - name: test-nfs
              persistentVolumeClaim:
                claimName: test-nfs
    
    
    Events:  <none>
    
    

    image-20210407222143495

    1.2、 指定配置目录

    # 指定文件夹
    [root@kubernetes-master-01 ~]# cd nfs-client-provisioner/
    [root@kubernetes-master-01 nfs-client-provisioner]# ll
    total 20
    -rw-r--r--. 1 root root  456 Nov 14 04:30 Chart.yaml
    drwxr-xr-x. 2 root root   30 Mar 21 20:02 ci
    -rw-r--r--. 1 root root 6248 Nov 14 04:30 README.md
    drwxr-xr-x. 2 root root 4096 Mar 21 20:02 templates
    -rw-r--r--. 1 root root 1878 Mar 21 21:41 values.yaml
    [root@kubernetes-master-01 nfs-client-provisioner]# cat Chart.yaml 
    apiVersion: v1
    appVersion: 3.1.0
    deprecated: true
    description: DEPRECATED - nfs-client is an automatic provisioner that used your *already
      configured* NFS server, automatically creating Persistent Volumes.
    home: https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client
    keywords:
    - nfs
    - storage
    name: nfs-client-provisioner
    sources:
    - https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client
    version: 1.2.11
    [root@kubernetes-master-01 nfs-client-provisioner]# 
    [root@kubernetes-master-01 nfs-client-provisioner]# 
    [root@kubernetes-master-01 nfs-client-provisioner]# cd ..
    [root@kubernetes-master-01 ~]# kubectl create configmap nfs --from-file=nfs-client-provisioner
    configmap/nfs created
    [root@kubernetes-master-01 ~]# kubectl get configmaps 
    NAME             DATA   AGE
    nfs              4      14s
    test-configmap   1      10m
    [root@kubernetes-master-01 ~]# kubectl describe configmaps nfs 
    Name:         nfs
    Namespace:    default
    Labels:       <none>
    Annotations:  <none>
    
    Data
    ====
    .helmignore:
    ----
    # Patterns to ignore when building packages.
    # This supports shell glob matching, relative path matching, and
    # negation (prefixed with !). Only one pattern per line.
    .DS_Store
    # Common VCS dirs
    .git/
    .gitignore
    .bzr/
    .bzrignore
    .hg/
    .hgignore
    .svn/
    # Common backup files
    *.swp
    *.bak
    *.tmp
    *~
    # Various IDEs
    .project
    .idea/
    *.tmproj
    
    Chart.yaml:
    ----
    apiVersion: v1
    appVersion: 3.1.0
    deprecated: true
    description: DEPRECATED - nfs-client is an automatic provisioner that used your *already
      configured* NFS server, automatically creating Persistent Volumes.
    home: https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client
    keywords:
    - nfs
    - storage
    name: nfs-client-provisioner
    sources:
    - https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client
    version: 1.2.11
    
    README.md:
    ----
    # ⚠️ Repo Archive Notice
    
    As of Nov 13, 2020, charts in this repo will no longer be updated.
    For more information, see the Helm Charts [Deprecation and Archive Notice](https://github.com/helm/charts#%EF%B8%8F-deprecation-and-archive-notice), and [Update](https://helm.sh/blog/charts-repo-deprecation/).
    
    # nfs-client-provisioner
    
    The [NFS client provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) is an automatic provisioner for Kubernetes that uses your *already configured* NFS server, automatically creating Persistent Volumes.
    
    ## DEPRECATION NOTICE
    
    This chart is deprecated and no longer supported.
    
    ## TL;DR;
    
    ​```console
    $ helm install --set nfs.server=x.x.x.x --set nfs.path=/exported/path stable/nfs-client-provisioner
    ​```
    
    For **arm** deployments set `image.repository` to `--set image.repository=quay.io/external_storage/nfs-client-provisioner-arm`
    
    ## Introduction
    
    This charts installs custom [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/) into a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. It also installs a [NFS client provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) into the cluster which dynamically creates persistent volumes from single NFS share.
    
    ## Prerequisites
    
    - Kubernetes 1.9+
    - Existing NFS Share
    
    ## Installing the Chart
    
    To install the chart with the release name `my-release`:
    
    ​```console
    $ helm install --name my-release --set nfs.server=x.x.x.x --set nfs.path=/exported/path stable/nfs-client-provisioner
    ​```
    
    The command deploys the given storage class in the default configuration. It can be used afterswards to provision persistent volumes. The [configuration](#configuration) section lists the parameters that can be configured during installation.
    
    > **Tip**: List all releases using `helm list`
    
    ## Uninstalling the Chart
    
    To uninstall/delete the `my-release` deployment:
    
    ​```console
    $ helm delete my-release
    ​```
    
    The command removes all the Kubernetes components associated with the chart and deletes the release.
    
    ## Configuration
    
    The following tables lists the configurable parameters of this chart and their default values.
    
    | Parameter                           | Description                                                 | Default                                           |
    | ----------------------------------- | ----------------------------------------------------------- | ------------------------------------------------- |
    | `replicaCount`                      | Number of provisioner instances to deployed                 | `1`                                               |
    | `strategyType`                      | Specifies the strategy used to replace old Pods by new ones | `Recreate`                                        |
    | `image.repository`                  | Provisioner image                                           | `quay.io/external_storage/nfs-client-provisioner` |
    | `image.tag`                         | Version of provisioner image                                | `v3.1.0-k8s1.11`                                  |
    | `image.pullPolicy`                  | Image pull policy                                           | `IfNotPresent`                                    |
    | `storageClass.name`                 | Name of the storageClass                                    | `nfs-client`                                      |
    | `storageClass.defaultClass`         | Set as the default StorageClass                             | `false`                                           |
    | `storageClass.allowVolumeExpansion` | Allow expanding the volume                                  | `true`                                            |
    | `storageClass.reclaimPolicy`        | Method used to reclaim an obsoleted volume                  | `Delete`                                          |
    | `storageClass.provisionerName`      | Name of the provisionerName                                 | null                                              |
    | `storageClass.archiveOnDelete`      | Archive pvc when deleting                                   | `true`                                            |
    | `storageClass.accessModes`          | Set access mode for PV                                      | `ReadWriteOnce`                                   |
    | `nfs.server`                        | Hostname of the NFS server                                  | null (ip or hostname)                             |
    | `nfs.path`                          | Basepath of the mount point to be used                      | `/ifs/kubernetes`                                 |
    | `nfs.mountOptions`                  | Mount options (e.g. 'nfsvers=3')                            | null                                              |
    | `resources`                         | Resources required (e.g. CPU, memory)                       | `{}`                                              |
    | `rbac.create`                       | Use Role-based Access Control                               | `true`                                            |
    | `podSecurityPolicy.enabled`         | Create & use Pod Security Policy resources                  | `false`                                           |
    | `priorityClassName`                 | Set pod priorityClassName                                   | null                                              |
    | `serviceAccount.create`             | Should we create a ServiceAccount                           | `true`                                            |
    | `serviceAccount.name`               | Name of the ServiceAccount to use                           | null                                              |
    | `nodeSelector`                      | Node labels for pod assignment                              | `{}`                                              |
    | `affinity`                          | Affinity settings                                           | `{}`                                              |
    | `tolerations`                       | List of node taints to tolerate                             | `[]`                                              |
    
    values.yaml:
    ----
    # Default values for nfs-client-provisioner.
    # This is a YAML-formatted file.
    # Declare variables to be passed into your templates.
    
    replicaCount: 1
    strategyType: Recreate
    
    image:
      repository: quay.io/external_storage/nfs-client-provisioner
      tag: v3.1.0-k8s1.11
      pullPolicy: IfNotPresent
    
    nfs:
      server: 172.16.0.50 # 修改为nfs的ip地址
      path: /nfs/v5  # 修改自己创建的路径
      mountOptions:
    
    # For creating the StorageClass automatically:
    storageClass:
      create: true
    
      # Set a provisioner name. If unset, a name will be generated.
      # provisionerName:
    
      # Set StorageClass as the default StorageClass
      # Ignored if storageClass.create is false
      defaultClass: false
    
      # Set a StorageClass name
      # Ignored if storageClass.create is false
      name: nfs-client
    
      # Allow volume to be expanded dynamically
      allowVolumeExpansion: true
    
      # Method used to reclaim an obsoleted volume
      reclaimPolicy: Delete # 回收策略
    
      # When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
      archiveOnDelete: true
    
      # Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
      accessModes: ReadWriteMany  # 修改多路可读可写
    
    ## For RBAC support:
    rbac:
      # Specifies whether RBAC resources should be created
      create: true
    
    # If true, create & use Pod Security Policy resources
    # https://kubernetes.io/docs/concepts/policy/pod-security-policy/
    podSecurityPolicy:
      enabled: false
    
    ## Set pod priorityClassName
    # priorityClassName: ""
    
    serviceAccount:
      # Specifies whether a ServiceAccount should be created
      create: true
    
      # The name of the ServiceAccount to use.
      # If not set and create is true, a name is generated using the fullname template
      name:
    
    resources: {}
      # limits:
      #  cpu: 100m
      #  memory: 128Mi
      # requests:
      #  cpu: 100m
      #  memory: 128Mi
    
    nodeSelector: {}
    
    tolerations: []
    
    affinity: {}
    
    Events:  <none>
    
    

    image-20210407223233679

    image-20210407223254525

    image-20210407223317833

    image-20210407223335104

    image-20210407223402416

    image-20210407223414286

    image-20210407223425182

    1.3、 指定配置项

    [root@kubernetes-master-01 ~]# kubectl create configmap calue --from-literal=env=prod --from-literal=language=Python
    configmap/calue created
    [root@kubernetes-master-01 ~]# kubectl get configmaps 
    NAME             DATA   AGE
    calue            2      7s
    nfs              4      11m
    test-configmap   1      21m
    [root@kubernetes-master-01 ~]# kubectl describe configmaps calue 
    Name:         calue
    Namespace:    default
    Labels:       <none>
    Annotations:  <none>
    
    Data
    ====
    env:
    ----
    prod
    language:
    ----
    Python
    Events:  <none>
    
    

    image-20210407224444356

    1.4、 通过配置清单的方式创建 configmap

    vim configmap.yaml
    
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: configmap
    data:
      # nginx: 123 多行数据用管道符 | 或 |-
      nginx: |
        upstream tomcatserver1 {
            server 192.168.72.49:8081;
        }
        upstream tomcatserver2 {
            server 192.168.72.49:8082;
        }
        server {
            listen 80;
            server_name 8081.max.com;
        location / {
            proxy_pass http://tomcatserver1;
            index index.html index.htm;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
          }
        }
        
    
    # 执行
    [root@kubernetes-master-01 ~]# vi configmap.yaml
    [root@kubernetes-master-01 ~]# kubectl apply -f co
    components-metrics-server.yaml  configmap.yaml                  
    [root@kubernetes-master-01 ~]# kubectl apply -f co
    components-metrics-server.yaml  configmap.yaml                  
    [root@kubernetes-master-01 ~]# kubectl apply -f configmap.yaml 
    configmap/configmap created
    [root@kubernetes-master-01 ~]# kubectl get con
    configmaps                controllerrevisions.apps  
    [root@kubernetes-master-01 ~]# kubectl get configmaps 
    NAME             DATA   AGE
    calue            2      15m
    configmap        1      37s
    nfs              4      26m
    test-configmap   1      36m
    [root@kubernetes-master-01 ~]# kubectl describe configmaps configmap 
    Name:         configmap
    Namespace:    default
    Labels:       <none>
    Annotations:  
    Data
    ====
    nginx:
    ----
    upstream tomcatserver1 {
        server 192.168.72.49:8081;
    }
    upstream tomcatserver2 {
        server 192.168.72.49:8082;
    }
    server {
        listen 80;
        server_name 8081.max.com;
    location / {
        proxy_pass http://tomcatserver1;
        index index.html index.htm;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
      }
    }
    
    Events:  <none>
    
    

    image-20210407225703782

    2、 使用 configmap

    一般情况下,我们是通过挂载的方式使用 configmap。

    # 使用 configmap
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      name: configmap-deploymemt
    spec:
      selector:
        matchLabels:
          app: configmap-deploymemt
      template:
        metadata:
          labels:
            app: configmap-deploymemt
        spec:
          containers:
            - name: nginx
              imagePullPolicy: IfNotPresent
              image: nginx:latest
              volumeMounts: # 挂载
                - mountPath: /usr/share/nginx/html
                  name: nginx
          volumes:
            - name: nginx
              configMap:
                name: configmap # 已经创建的configmap并且存在的
                items:
                  - key: nginx
                    path: nginx
    
                        
    # 查看configmaps
    [root@kubernetes-master-01 ~]# kubectl get configmaps 
    NAME             DATA   AGE
    calue            2      28m
    configmap        1      14m
    nfs              4      39m
    test-configmap   1      49m
    # 创建pods
    [root@kubernetes-master-01 ~]# vi configmap-deployment.yaml
    [root@kubernetes-master-01 ~]# kubectl apply -f configmap-deployment.yaml 
    deployment.apps/configmap-deploymemt created
    [root@kubernetes-master-01 ~]# kubectl get pods -l app=configmap-deploymemt
    NAME                                    READY   STATUS    RESTARTS   AGE
    configmap-deploymemt-54b79567c7-9xpk7   1/1     Running   0          21s
    # 查看挂载
    [root@kubernetes-master-01 ~]# kubectl exec -it configmap-deploymemt-54b79567c7-9xpk7 -- bash
    root@configmap-deploymemt-54b79567c7-9xpk7:/# cd /usr/share/nginx/html/
        
        
    # nginx下面的配置信息被覆盖了
    root@configmap-deploymemt-54b79567c7-9xpk7:/usr/share/nginx/html# ls
    nginx
    root@configmap-deploymemt-54b79567c7-9xpk7:/usr/share/nginx/html# cat nginx 
    upstream tomcatserver1 {
        server 192.168.72.49:8081;
    }
    upstream tomcatserver2 {
        server 192.168.72.49:8082;
    }
    server {
        listen 80;
        server_name 8081.max.com;
    location / {
        proxy_pass http://tomcatserver1;
        index index.html index.htm;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
      }
    }
    root@configmap-deploymemt-54b79567c7-9xpk7:/usr/share/nginx/html# ls -alh
    total 0
    drwxrwxrwx. 3 root root 72 Apr  7 15:10 .
    drwxr-xr-x. 3 root root 18 Jan 12 10:17 ..
    drwxr-xr-x. 2 root root 19 Apr  7 15:10 ..2021_04_07_15_10_12.993253073
    lrwxrwxrwx. 1 root root 31 Apr  7 15:10 ..data -> ..2021_04_07_15_10_12.993253073
    lrwxrwxrwx. 1 root root 12 Apr  7 15:10 nginx -> ..data/nginx
    root@configmap-deploymemt-54b79567c7-9xpk7:/usr/share/nginx/html# 
    

    image-20210407231333317

    2.1、 subPath

    mountPath 结合 subPath(也可解决多个 configmap 挂载同一目录,导致覆盖)作用(不支持热更新)。

    # 使用 subPath
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      name: subpath-configmap-deploymemt
    spec:
      selector:
        matchLabels:
          app: subpath-configmap-deploymemt
      template:
        metadata:
          labels:
            app: subpath-configmap-deploymemt
        spec:
          containers:
            - name: nginx
              imagePullPolicy: IfNotPresent
              image: nginx:latest
              volumeMounts: # 挂载
                - mountPath: /usr/share/nginx/html/index.html
                  name: nginx
                  subPath: nginx # 可解决多个 configmap 挂载同一目录,导致覆盖 名字与下面的key一样 - key: nginx
    
          volumes:
            - name: nginx
              configMap:
                name: configmap # 已经创建的configmap并且存在的
                items:
                  - key: nginx
                    path: nginx
    
                        
                        
                        
    [root@kubernetes-master-01 ~]# vi subpath-configmap-deployment.yaml 
    [root@kubernetes-master-01 ~]# kubectl apply -f subpath-configmap-deployment.yaml 
    deployment.apps/subpath-configmap-deploymemt created
    [root@kubernetes-master-01 ~]# kubectl get pods -l app=subpath-configmap-deploymemt
    NAME                                            READY   STATUS    RESTARTS   AGE
    subpath-configmap-deploymemt-5fd76f975c-l5jlx   1/1     Running   0          17s
    [root@kubernetes-master-01 ~]# kubectl exec -it subpath-configmap-deploymemt-5fd76f975c-l5jlx -- bash
    root@subpath-configmap-deploymemt-5fd76f975c-l5jlx:/# cd /usr/share/nginx/html/
    root@subpath-configmap-deploymemt-5fd76f975c-l5jlx:/usr/share/nginx/html# ll
    bash: ll: command not found
    root@subpath-configmap-deploymemt-5fd76f975c-l5jlx:/usr/share/nginx/html# ls
    50x.html  index.html
    root@subpath-configmap-deploymemt-5fd76f975c-l5jlx:/usr/share/nginx/html# cat index.html 
    upstream tomcatserver1 {
        server 192.168.72.49:8081;
    }
    upstream tomcatserver2 {
        server 192.168.72.49:8082;
    }
    server {
        listen 80;
        server_name 8081.max.com;
    location / {
        proxy_pass http://tomcatserver1;
        index index.html index.htm;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
      }
    }
    root@subpath-configmap-deploymemt-5fd76f975c-l5jlx:/usr/share/nginx/html# exit
    exit
    [root@kubernetes-master-01 ~]# kubectl get pods -o wide
    NAME                                                 READY   STATUS    RESTARTS   AGE     IP             NODE                   NOMINATED NODE   READINESS GATES
    empty-dir-796cf6fc58-h2rpf                           2/2     Running   0          17d     10.241.24.3    kubernetes-node-02     <none>           <none>
    hfs-client-nfs-client-provisioner-5dd68bbddf-dn2lq   1/1     Running   3          17d     10.241.144.3   kubernetes-master-03   <none>           <none>
    hostpath-79f9457bc6-vcb6f                            1/1     Running   3          17d     10.240.48.3    kubernetes-master-01   <none>           <none>
    nfs-pv-pvc-67f9f546f7-897d4                          1/1     Running   3          17d     10.240.168.2   kubernetes-node-01     <none>           <none>
    nfs-pv-pvc-67f9f546f7-8ngw6                          1/1     Running   2          17d     10.240.48.2    kubernetes-master-01   <none>           <none>
    nginx-569cc66c-7mnwc                                 1/1     Running   0          17d     10.241.24.2    kubernetes-node-02     <none>           <none>
    nginx-hpa-fb47f9ccb-zj2hg                            1/1     Running   3          17d     10.241.144.4   kubernetes-master-03   <none>           <none>
    nginx-pod                                            1/1     Running   10         31d     10.242.128.3   kubernetes-master02    <none>           <none>
    subpath-configmap-deploymemt-5fd76f975c-l5jlx        1/1     Running   0          2m36s   10.240.168.4   kubernetes-node-01     <none>           <none>
    test                                                 1/1     Running   2          17d     10.240.48.4    kubernetes-master-01   <none>           <none>
    test-nfs-storageclass-748b6f9c48-5q2qb               1/1     Running   3          17d     10.241.144.5   kubernetes-master-03   <none>           <none>
    [root@kubernetes-master-01 ~]# curl 10.240.168.4
    upstream tomcatserver1 {
        server 192.168.72.49:8081;
    }
    upstream tomcatserver2 {
        server 192.168.72.49:8082;
    }
    server {
        listen 80;
        server_name 8081.max.com;
    location / {
        proxy_pass http://tomcatserver1;
        index index.html index.htm;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
      }
    }
    
    

    image-20210407233313744

    image-20210407233333851

    image-20210419221040881

    2.2、 configmap 热更新

    # 热更新
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: my-nginx
    spec:
      selector:
        matchLabels:
          run: my-nginx
      template:
        metadata:
          labels:
            run: my-nginx
        spec:
          containers:
            - name: my-nginx
              image: nginx
              ports:
                - containerPort: 80
              envFrom:
                - configMapRef:
                    name: env-config
              volumeMounts:
                - mountPath: /usr/share/nginx/demo/
                  name: config
          volumes:
            - name: config
              configMap:
                name: env-config
                items:
                  - key: log_level
                    path: log_level
      replicas: 1
    ---
    apiVersion: v1
    kind: ConfigMap
    metadata:
      name: env-config
      namespace: default
    data:
      log_level: INFO
    
    
    

    创建热更新

    [root@kubernetes-master-01 ~]# kubectl apply -f configmap-hot-update.yaml 
    deployment.apps/my-nginx created
    configmap/env-config created
    [root@kubernetes-master-01 ~]# kubectl get pods
    NAME                                                 READY   STATUS              RESTARTS   AGE
    empty-dir-796cf6fc58-h2rpf                           2/2     Running             0          29d
    hfs-client-nfs-client-provisioner-5dd68bbddf-dn2lq   1/1     Running             5          29d
    hostpath-79f9457bc6-vcb6f                            1/1     Running             4          29d
    my-nginx-74f7b9d7fb-qr2sq                            0/1     ContainerCreating   0          34s
    nfs-pv-pvc-67f9f546f7-897d4                          1/1     Running             5          29d
    nfs-pv-pvc-67f9f546f7-8ngw6                          1/1     Running             4          29d
    nginx-569cc66c-7mnwc                                 1/1     Running             0          29d
    nginx-hpa-fb47f9ccb-zj2hg                            1/1     Running             4          29d
    nginx-pod                                            1/1     Running             11         43d
    subpath-configmap-deploymemt-5fd76f975c-l5jlx        1/1     Running             1          11d
    test                                                 1/1     Running             3          29d
    test-nfs-storageclass-748b6f9c48-5q2qb               1/1     Running             4          29d
    [root@kubernetes-master-01 ~]# kubectl get pods
    NAME                                                 READY   STATUS    RESTARTS   AGE
    empty-dir-796cf6fc58-h2rpf                           2/2     Running   0          29d
    hfs-client-nfs-client-provisioner-5dd68bbddf-dn2lq   1/1     Running   5          29d
    hostpath-79f9457bc6-vcb6f                            1/1     Running   4          29d
    my-nginx-74f7b9d7fb-qr2sq                            1/1     Running   0          93s
    nfs-pv-pvc-67f9f546f7-897d4                          1/1     Running   5          29d
    nfs-pv-pvc-67f9f546f7-8ngw6                          1/1     Running   4          29d
    nginx-569cc66c-7mnwc                                 1/1     Running   0          29d
    nginx-hpa-fb47f9ccb-zj2hg                            1/1     Running   4          29d
    nginx-pod                                            1/1     Running   11         43d
    subpath-configmap-deploymemt-5fd76f975c-l5jlx        1/1     Running   1          11d
    test                                                 1/1     Running   3          29d
    test-nfs-storageclass-748b6f9c48-5q2qb               1/1     Running   4          29d
    [root@kubernetes-master-01 ~]# kubectl exec -it my-nginx-74f7b9d7fb-qr2sq -- bash
    root@my-nginx-74f7b9d7fb-qr2sq:/# cd /usr/share/nginx/demo/
    root@my-nginx-74f7b9d7fb-qr2sq:/usr/share/nginx/demo# ls
    log_level
    root@my-nginx-74f7b9d7fb-qr2sq:/usr/share/nginx/demo# cat log_level 
    INFOroot@my-nginx-74f7b9d7fb-qr2sq:/usr/share/nginx/demo# cat log_level 
    INFOroot@my-nginx-74f7b9d7fb-qr2sq:/usr/share/nginx/demo#
    

    image-20210419222823437

    热更新

    [root@kubernetes-master-01 ~]# kubectl get configmaps
    NAME             DATA   AGE
    calue            2      11d
    configmap        1      11d
    env-config       1      6m43s
    nfs              4      12d
    test-configmap   1      12d
    [root@kubernetes-master-01 ~]# kubectl edit configmaps env-config
    
    
    # Please edit the object below. Lines beginning with a '#' will be ignored,
    # and an empty file will abort the edit. If an error occurs while saving this file will be
    # reopened with the relevant failures.
    #
    apiVersion: v1
    data:
      log_level: INFO
    kind: ConfigMap
    metadata:
      annotations:
        kubectl.kubernetes.io/last-applied-configuration: |
          {"apiVersion":"v1","data":{"log_level":"INFO"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"env-config","namespace":"default"}}
      creationTimestamp: "2021-04-19T14:22:34Z"
      managedFields:
      - apiVersion: v1
        fieldsType: FieldsV1
        fieldsV1:
          f:data:
            .: {}
            f:log_level: {}
          f:metadata:
            f:annotations:
              .: {}
              f:kubectl.kubernetes.io/last-applied-configuration: {}
        manager: kubectl
        operation: Update
        time: "2021-04-19T14:22:34Z"
      name: env-config
      namespace: default
      resourceVersion: "610639"
      selfLink: /api/v1/namespaces/default/configmaps/env-config
      uid: 8f1dd1b1-fed2-419e-bb8b-a81c19ca0bce
    

    image-20210419223127200

    image-20210419223239318

    image-20210419223410585

    3、 Secret

    Secret 解决了密码、token、密钥等敏感数据的配置问题,而不需要把这些敏感数据暴露到镜像或者 Pod Spec 中。Secret 可以以 Volume 或者环境变量的方式使用

    Secret 有三种类型:

    • Service Account : 用 来 访 问 Kubernetes API , 由 Kubernetes 自 动 创 建 , 并 且 会 自 动 挂 载 到 Pod 的 /run/secrets/kubernetes.io/serviceaccount 目录中
    • Opaque :base64 编码格式的 Secret,用来存储密码、密钥等;
    • kubernetes.io/dockerconfigjson :用来存储私有 docker registry 的认
    
    

    3.1、 Opaque Secret

    Opaque 类型的数据是一个 map 类型,要求 value 是 base64

    
    

    3.1.1、编写 secret 配置清单

    
    

    3.1.2、创建 secret 资

    
    

    3.1.3、编写测试应用配置清单

    
    

    3.1.4、创建测试资源

    
    

    3.2、 kubernetes.io/dockerconf

    用来存储私有 docker registry 的认证

    3.2.1、创建 secret

    
    

    3.2.2、创建测试 secret

    
    

    3.3、 Service Account

    Service Account 用 来 访 问 Kubernetes API , 由 Kubernetes 自 动 创 建 , 并 且 会 自 动 挂 载 到 Pod 的 /run/secrets/kubernetes.io/serviceaccount 目录中

    
    
  • 相关阅读:
    测试用例编写(功能测试框架)
    OKR与KPI管理的区别与联系
    手机测试常见的BUG解析
    软件测试之BUG分析定位概述(QA如何分析定位BUG)【转自 https://blog.csdn.net/kaka1121/article/details/51538979】
    KPI、KPA、OKR三者的区别
    swagger api 文档框架
    Jmeter + Ant + Jenkins 接口/性能测试,持续集成环境搭建
    重建词汇精神家园
    记忆的本质
    attention机制七搞八搞
  • 原文地址:https://www.cnblogs.com/randysun/p/15595929.html
Copyright © 2020-2023  润新知