• kubernetes 安装 helm,ingress


    1、安装helm客户端

    [root@cc-k8s01 work]# wget https://storage.googleapis.com/kubernetes-helm/helm-v2.14.0-rc.2-linux-amd64.tar.gz
    [root@cc-k8s01 work]# tar zxf helm-v2.14.0-rc.2-linux-amd64.tar.gz
    [root@cc-k8s01 work]# cd linux-amd64/
    [root@cc-k8s01 linux-amd64]# mv helm /opt/k8s/bin
    [root@cc-k8s01 linux-amd64]# helm
    The Kubernetes package manager
     
    To begin working with Helm, run the 'helm init' command:
     
        $ helm init
    

    2、安装tiller服务端

    1.创建RBACq角色并授权

    kubectl create serviceaccount tiller --namespace kube-system
    kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
    kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
    

      2.创建tls通迅的证书

    ##创建tiller服务器证书CSR
    cd /opt/k8s/work
    cat > tiller-server-csr.json <<EOF
    {
      "CN": "tiller-server",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
         {
          "C": "CN",
          "ST": "BeiJing",
          "L": "BeiJing",
          "O": "k8s",
          "OU": "4Paradigm"
        }
      ]
    }
    EOF
    ##生成证书,沿用kubernetes的CA证书
    cd /opt/k8s/work
    cfssl gencert -ca=/opt/k8s/work/ca.pem 
      -ca-key=/opt/k8s/work/ca-key.pem 
      -config=/opt/k8s/work/ca-config.json 
      -profile=kubernetes tiller-server-csr.json | cfssljson -bare tiller-server
    ls tiller-server*
    
    ####创建helm与tiller服务器通迅的证书CSR
    cd /opt/k8s/work
    cat > helm-csr.json <<EOF
    {
      "CN": "tiller",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
         {
          "C": "CN",
          "ST": "BeiJing",
          "L": "BeiJing",
          "O": "k8s",
          "OU": "4Paradigm"
        }
      ]
    }
    EOF
    ##生成证书,沿用kubernetes的CA证书
    cd /opt/k8s/work
    cfssl gencert -ca=/opt/k8s/work/ca.pem 
      -ca-key=/opt/k8s/work/ca-key.pem 
      -config=/opt/k8s/work/ca-config.json 
      -profile=kubernetes helm-csr.json | cfssljson -bare helm
    

      3.初始化tiller

    #由于镜像是国外镜像,故需先使用其他源
    [root@cc-k8s02 ~]# docker pull gcr.azk8s.cn/kubernetes-helm/tiller:v2.14.0-rc.2
    v2.14.0-rc.2: Pulling from kubernetes-helm/tiller
    bdf0201b3a05: Pull complete
    879b5272666d: Pull complete
    3dd50e1bb957: Pull complete
    ba6c28ffc2cd: Pull complete
    Digest: sha256:5d19651f555dfcd9aafa52ee569413aa38e60ddab19e54c6dc33ad60da2d46ed
    Status: Downloaded newer image for gcr.azk8s.cn/kubernetes-helm/tiller:v2.14.0-rc.2
    [root@cc-k8s02 ~]# docker tag gcr.azk8s.cn/kubernetes-helm/tiller:v2.14.0-rc.2 gcr.io/kubernetes-helm/tiller:v2.14.0-rc.2

    [root@cc-k8s01 work]# helm init --tiller-tls --tiller-tls-cert ./tiller-server.pem --tiller-tls-key ./tiller-server-key.pem --tiller-tls-verify --tls-ca-cert ca.pem $HELM_HOME has been configured at /root/.helm. [root@cc-k8s01 work]# kubectl get pods -n kube-system NAME READY STATUS RESTARTS AGE coredns-5b969f4c88-n8kht 1/1 Running 4 9d elasticsearch-logging-0 1/1 Running 0 9d elasticsearch-logging-1 1/1 Running 0 9d fluentd-es-v2.4.0-5n9q2 1/1 Running 0 9d fluentd-es-v2.4.0-8flsx 1/1 Running 0 9d fluentd-es-v2.4.0-qlhb5 1/1 Running 0 9d kibana-logging-f4d99b69f-mw28c 1/1 Running 0 9d kube-state-metrics-699fdf75f8-2tskt 1/1 Running 0 2d kubernetes-dashboard-7848d45466-mg4w5 1/1 Running 0 9d metrics-server-6f97f5879-qg8vx 1/1 Running 0 9d tiller-deploy-78fc7f6db4-jq5zs 1/1 Running 0 14s ##将helm的证书、私钥和CA证书拷贝到/root/.helm,并改名为key.pem和cert.pem cp ca.pem helm.pem helm-key.pem /root/.helm cd /root/.helm mv helm-key.pem key.pem mv helm.pem cert.pem ##然后就可以使用helm 加上--tls参数与tiller进行加密通迅 [root@cc-k8s01 work]# helm ls --tls 产生以下报错 [root@cc-k8s01 work]# helm ls --tls E0703 13:32:38.232485 22034 portforward.go:400] an error occurred forwarding 42494 -> 44134: error forwarding port 44134 to pod c656f2021e1cdea7ac0710bf13d9e55d27a7713f4c4c77f80b92379be5d544bf, uid : unable to do port forwarding: socat not found. E0703 13:32:39.236986 22034 portforward.go:400] an error occurred forwarding 42494 -> 44134: error forwarding port 44134 to pod c656f2021e1cdea7ac0710bf13d9e55d27a7713f4c4c77f80b92379be5d544bf, uid : unable to do port forwarding: socat not found. E0703 13:32:40.688934 22034 portforward.go:400] an error occurred forwarding 42494 -> 44134: error forwarding port 44134 to pod c656f2021e1cdea7ac0710bf13d9e55d27a7713f4c4c77f80b92379be5d544bf, uid : unable to do port forwarding: socat not found. E0703 13:33:13.681938 22034 portforward.go:340] error creating error stream for port 42494 -> 44134: Timeout occured [root@cc-k8s01 work]:~# helm version Client:
    &version.Version{SemVer:"v2.14.0-rc.2", GitCommit:"012cb0ac1a1b2f888144ef5a67b8dab6c2d45be6", GitTreeState:"clean"} E0711 10:09:50.160064 10916 portforward.go:332] an error occurred forwarding 33491 -> 44134: error forwarding port 44134 to pod tiller-deploy-542252878-15h67_kube-system, uid : unable to do port forwarding: socat not found. Error: cannot connect to Tiller !!解决yum install -y socat [root@cc-k8s01 work]# helm ls --tls Error: context deadline exceeded [root@cc-k8s01 nginx-ingress]# helm reset
    ##重新授权
    kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}' [root@cc-k8s01 work]# helm init --tiller-tls --tiller-tls-cert ./tiller-server.pem --tiller-tls-key ./tiller-server-key.pem --tiller-tls-verify --tls-ca-cert ca.pem $HELM_HOME has been configured at /root/.helm. 

    3、安装ingress

    1、下载Charts

    由于helm的默认charts repository是 https://kubernetes-charts.storage.googleapis.com/ 无法访问,所以需通过离线安装的方式先从github.com clone相关的charts

    [root@cc-k8s01 work]# git clone https://github.com/helm/charts.git
    Cloning into 'charts'...
    remote: Enumerating objects: 11, done.
    remote: Counting objects: 100% (11/11), done.
    remote: Compressing objects: 100% (7/7), done.
    remote: Total 69700 (delta 4), reused 8 (delta 4), pack-reused 69689
    Receiving objects: 100% (69700/69700), 19.88 MiB | 4.27 MiB/s, done.
    Resolving deltas: 100% (49888/49888), done.
     
    [root@cc-k8s01 work]# cd charts/stable/nginx-ingress/
    [root@cc-k8s01 nginx-ingress]# ls -l
    total 48
    -rw-r--r-- 1 root root   569 Jun  4 17:21 Chart.yaml
    drwxr-xr-x 2 root root  4096 Jun  4 17:21 ci
    -rw-r--r-- 1 root root   110 Jun  4 17:21 OWNERS
    -rw-r--r-- 1 root root 17923 Jun  4 17:21 README.md
    drwxr-xr-x 2 root root  4096 Jun  4 17:26 templates
    -rw-r--r-- 1 root root 10045 Jun  4 17:21 values.yaml

    2、下载必要的镜像

    nginx-ingress的镜像是从quay.io拉取,可以正常访问。但是defaultbackend的镜像是从k8s.gcr.io拉取,这个没法访问。需要先从dockerhub上拉取,不然部署的时候会报ErrImagePull

    [root@cc-k8s02 ~]# docker pull googlecontainer/defaultbackend-amd64:1.5
    1.5: Pulling from googlecontainer/defaultbackend-amd64
    65f4220de95d: Pull complete 
    Digest: sha256:4dc5e07c8ca4e23bddb3153737d7b8c556e5fb2f29c4558b7cd6e6df99c512c7
    Status: Downloaded newer image for googlecontainer/defaultbackend-amd64:1.5
    [root@cc-k8s02 ~]# docker tag googlecontainer/defaultbackend-amd64:1.5 k8s.gcr.io/defaultbackend-amd64:1.5

    3、通过helm安装nginx-ingress

    常用参数如下:

    • controller.hostNetwork=true hostNetwork模式,即在node主机上监听80/443端口用来集群外提供服务。默认为nodePort模式
    • controller.dnsPolicy DNS策略,如果使用hostNetwork,则必须配置为ClusterFirstWithHostNet。目的是允许nginx-ingress使用主机网络监听,但仍沿用集群DNS解析
    • controller.stats.enabled=true 开启nginx的status页面,以获取nginx运行的统计信息
    • controller.metrics.enabled=true 开户prometheus监控metrics
    • rbac.create=true 部署时创建nginx-ingress所需的rbac角色和权限,在使用rbac权限体系的k8s集群中必须指定此项
    • controller.kind 部署为Deployment或者DaemonSet,如果是Deployment则默认只部署一个,如果是DaemonSet则在每个Node上部署一个

    安装操作

    [root@cc-k8s01 nginx-ingress]# helm install --tls --namespace kube-system --name nginx-ingress --set controller.kind=DaemonSet --set rbac.create=true --set controller.stats.enabled=true --set controller.metri controller.dnsPolicy=ClusterFirstWithHostNet ./
    NAME:   nginx-ingress
    LAST DEPLOYED: Wed Jul  3 14:25:54 2019
    NAMESPACE: kube-system
    STATUS: DEPLOYED
    
    RESOURCES:
    ==> v1/Pod(related)
    NAME                                            READY  STATUS             RESTARTS  AGE
    nginx-ingress-controller-2rlpx                  0/1    ContainerCreating  0         0s
    nginx-ingress-controller-8lpbs                  0/1    ContainerCreating  0         0s
    nginx-ingress-controller-zh4z9                  0/1    ContainerCreating  0         0s
    nginx-ingress-default-backend-7474b6b4cd-d6d9d  0/1    ContainerCreating  0         0s
    
    ==> v1/Service
    NAME                              TYPE          CLUSTER-IP      EXTERNAL-IP  PORT(S)                     AGE
    nginx-ingress-controller          LoadBalancer  10.254.51.167   <pending>    80:31220/TCP,443:30851/TCP  0s
    nginx-ingress-controller-metrics  ClusterIP     10.254.70.26    <none>       9913/TCP                    0s
    nginx-ingress-controller-stats    ClusterIP     10.254.250.132  <none>       18080/TCP                   0s
    nginx-ingress-default-backend     ClusterIP     10.254.142.218  <none>       80/TCP                      0s
    
    ==> v1/ServiceAccount
    NAME           SECRETS  AGE
    nginx-ingress  1        0s
    
    ==> v1beta1/ClusterRole
    NAME           AGE
    nginx-ingress  0s
    
    ==> v1beta1/ClusterRoleBinding
    NAME           AGE
    nginx-ingress  0s
    
    ==> v1beta1/DaemonSet
    NAME                      DESIRED  CURRENT  READY  UP-TO-DATE  AVAILABLE  NODE SELECTOR  AGE
    nginx-ingress-controller  3        3        0      3           0          <none>         0s
    
    ==> v1beta1/Deployment
    NAME                           READY  UP-TO-DATE  AVAILABLE  AGE
    nginx-ingress-default-backend  0/1    1           0          0s
    
    ==> v1beta1/Role
    NAME           AGE
    nginx-ingress  0s
    
    ==> v1beta1/RoleBinding
    NAME           AGE
    nginx-ingress  0s
    
    
    NOTES:
    The nginx-ingress controller has been installed.
    It may take a few minutes for the LoadBalancer IP to be available.
    You can watch the status by running 'kubectl --namespace kube-system get services -o wide -w nginx-ingress-controller'
    
    An example Ingress that makes use of the controller:
    
      apiVersion: extensions/v1beta1
      kind: Ingress
      metadata:
        annotations:
          kubernetes.io/ingress.class: nginx
        name: example
        namespace: foo
      spec:
        rules:
          - host: www.example.com
            http:
              paths:
                - backend:
                    serviceName: exampleService
                    servicePort: 80
                  path: /
        # This section is only required if TLS is to be enabled for the Ingress
        tls:
            - hosts:
                - www.example.com
              secretName: example-tls
    
    If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
    
      apiVersion: v1
      kind: Secret
      metadata:
        name: example-tls
        namespace: foo
      data:
        tls.crt: <base64 encoded cert>
        tls.key: <base64 encoded key>
      type: kubernetes.io/tls

    上面安装使用了hostNetwork,可以看到在每个Node监听了80/443

    [root@cc-k8s02 ~]# netstat -nltp|grep -E "(:80|:443)"
    tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      13676/nginx: master 
    tcp        0      0 0.0.0.0:443             0.0.0.0:*               LISTEN      13676/nginx: master

    controller.stats.enabled=true 参数开启了nginx_status页面,可以通过curl来验证nginx-ingress的联通性

    [root@cc-k8s02 ~]# curl localhost/nginx_status
    Active connections: 1 
    server accepts handled requests
     1395 1395 1392 
    Reading: 0 Writing: 1 Waiting: 0

    4、通过helm删除nginx-ingress部署

    如果不带--purge,则名叫nginx-ingress仍会存在,只是状态为delete。下次如果使用同名部署则会报错,使用–purge是完整删除。

    [root@cc-k8s01 ~]# helm delete --tls  nginx-ingress --purge
    release "nginx-ingress" deleted
  • 相关阅读:
    (二)unittest之跳过测试和预期失败的用例
    (一)基本使用
    python从入门到入魔
    (十)中断测试
    (九)进程测试
    (八)安装、卸载与升级更新测试
    (七)功能测试
    (六)内存分析,待补充
    (五)跑Monkey过程中出现的ANR问题分析
    (四)一种精准monkey测试的方法
  • 原文地址:https://www.cnblogs.com/jcici/p/11126581.html
Copyright © 2020-2023  润新知