• ubuntu 16.04安装K8S


    一、在master和node上执行以下步骤

    1. 关闭 swap和firewall

    1. vim /etc/fstab             ///// 或者直接swapoff -a
       注释掉包含swap的行
    2. systemctl stop ufw 
    3. systemctl disable ufw
    root@master:/etc# systemctl stoip ufw && systemctl disable ufw
    Unknown operation stoip.
    root@master:/etc# systemctl stop ufw && systemctl disable ufw
    Synchronizing state of ufw.service with SysV init with /lib/systemd/systemd-sysv-install...
    Executing /lib/systemd/systemd-sysv-install disable ufw
    insserv: warning: current start runlevel(s) (empty) of script `ufw' overrides LSB defaults (S).
    insserv: warning: current stop runlevel(s) (1 S) of script `ufw' overrides LSB defaults (1).

    2. 添加kubernets源并更新

    1. echo "deb http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list

    root@master:~# cat /etc/apt/sources.list.d/kubernetes.list deb http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial main

    2. apt-get update

    root@master:~# apt-get update
    Hit:1 http://mirrors.aliyun.com/ubuntu xenial InRelease
    Hit:2 http://mirrors.aliyun.com/ubuntu xenial-updates InRelease
    Hit:3 http://mirrors.aliyun.com/ubuntu xenial-backports InRelease
    Hit:4 http://mirrors.aliyun.com/ubuntu xenial-security InRelease
    Get:5 http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial InRelease [9,383 B]
    Ign:5 http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial InRelease
    Fetched 9,383 B in 1s (6,443 B/s)
    Reading package lists... Done
    W: GPG error: http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY 6A030B21BA07F4FB NO_PUBKEY 8B57C5C2836F4BEB
    W: The repository 'http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial InRelease' is not signed.
    N: Data from such a repository can't be authenticated and is therefore potentially dangerous to use.
    N: See apt-secure(8) manpage for repository creation and user configuration details.

    3. BA07F4FB为上面apt-get update结果里报错的后八位

    gpg --keyserver keyserver.ubuntu.com --recv-keys BA07F4FB

    4. gpg --export --armor BA07F4FB | sudo apt-key add -

    5. apt-get update

    3. install docker

    apt-get install docker.io -y

    4. 启动docker

    systemctl enable docker
    systemctl start docker
    systemctl status docker

    5. 安装kubernetes组件,本实验中安装1.18.12

    1. root@master:~# apt-cache madison kubelet|grep 1.18.12
       kubelet | 1.18.12-00 | http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 Packages
       root@master:~# apt-cache madison kubeadm|grep 1.18.12
       kubeadm | 1.18.12-00 | http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 Packages
       root@master:~# apt-cache madison kubectl|grep 1.18.12
       kubectl | 1.18.12-00 | http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 Packages
       root@master:~# 
    2. apt-get install -y kubelet=1.18.12-00 kubeadm=1.18.12-00 kubectl=1.18.12-00
    3. systemctl enable kubelet

    6. 确认集群是cgroup还是systemd

    (在本实验中)需要将docker driver修改为Kubernetes推荐的driver,本例中,建议是systemd
    root@node1:/etc/apt# docker info | grep -i cgroup
    WARNING: No swap limit support
    Cgroup Driver: cgroupfs

    1. vim daemon.json
    
    root@master:/etc/docker# cat daemon.json 
    {
     "exec-opts":["native.cgroupdriver=systemd"]
    }
    
    2. systemctl restart docker
    3. systemctl status docker

    二、在master上执行以下步骤

    1. 定义环境变量

    export KUBECONFIG=/etc/kubernetes/admin.conf

    2. master 上执行

    systemctl daemon-reload
    systemctl restart kubelet

    3. 手动下载docker image,并修改tag为kubeadm config images list 默认的名字

    1.kubeadm config images list
    root@master:/etc/kubernetes# kubeadm config images list
    I0120 19:08:54.635926 16047 version.go:252] remote version is much newer: v1.20.2; falling back to: stable-1.18
    W0120 19:09:02.628767 16047 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
    k8s.gcr.io/kube-apiserver:v1.18.15
    k8s.gcr.io/kube-controller-manager:v1.18.15
    k8s.gcr.io/kube-scheduler:v1.18.15
    k8s.gcr.io/kube-proxy:v1.18.15
    k8s.gcr.io/pause:3.2
    k8s.gcr.io/etcd:3.4.3-0
    k8s.gcr.io/coredns:1.6.7

    2. docker pull <images>

      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.12
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.12
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.12
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.12
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7

    3. docker imagse
    root@master:/etc/kubernetes# docker images 
    REPOSITORY                                                                    TAG                 IMAGE ID            CREATED             SIZE
    registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy                v1.18.12            6947b0d99ceb        6 days ago          117MB
    registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver            v1.18.12            21e89bb12d33        6 days ago          173MB
    registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager   v1.18.12            4b3915bbba95        6 days ago          162MB
    registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler            v1.18.12            db6167a559ba        6 days ago          95.3MB
    registry.cn-hangzhou.aliyuncs.com/google_containers/pause                     3.2                 80d28bedfe5d        11 months ago       683kB
    registry.cn-hangzhou.aliyuncs.com/google_containers/coredns                   1.6.7               67da37a9a360        11 months ago       43.8MB
    registry.cn-hangzhou.aliyuncs.com/google_containers/etcd               
    
    4. docker tag
    
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.12 k8s.gcr.io/kube-proxy:v1.18.12
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.12 k8s.gcr.io/kube-apiserver:v1.18.12
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.12 k8s.gcr.io/kube-controller-manager:v1.18.12
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.12 k8s.gcr.io/kube-scheduler:v1.18.12
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7 k8s.gcr.io/coredns:1.6.7
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0 k8s.gcr.io/etcd:3.4.3-0

    4. 如果 kubeadm init后,报以下warning,就需要修改docker的默认驱动,从cgroupfs到systemd

    root@master:~# kubeadm init --kubernetes-version=v1.18.12 --pod-network-cidr 200.0.0.0/16
    W0120 19:04:40.730832   14876 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
    [init] Using Kubernetes version: v1.18.12
    [preflight] Running pre-flight checks
            [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
    [preflight] Pulling images required for setting up a Kubernetes cluster
    [preflight] This might take a minute or two, depending on the speed of your internet connection
    [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'

    修改如下:

    1. vim daemon.json
    
    root@master:/etc/docker# cat daemon.json 
    {
     "exec-opts":["native.cgroupdriver=systemd"]
    }
    
    2. systemctl restart docker
    3. systemctl status docker

    5. 初始化集群

    kubeadm init --kubernetes-version=v1.18.12 --pod-network-cidr 200.0.0.0/16 --apiserver-advertise-address=100.0.0.5

    指定api地址

     1 root@master:/etc/kubernetes# kubeadm init --kubernetes-version=v1.18.12 --pod-network-cidr 200.0.0.0/16
     2 W0120 19:36:21.878161   22147 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
     3 [init] Using Kubernetes version: v1.18.12
     4 [preflight] Running pre-flight checks
     5 [preflight] Pulling images required for setting up a Kubernetes cluster
     6 [preflight] This might take a minute or two, depending on the speed of your internet connection
     7 [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
     8 [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
     9 [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
    10 [kubelet-start] Starting the kubelet
    11 [certs] Using certificateDir folder "/etc/kubernetes/pki"
    12 [certs] Generating "ca" certificate and key
    13 [certs] Generating "apiserver" certificate and key
    14 [certs] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 100.0.0.5]
    15 [certs] Generating "apiserver-kubelet-client" certificate and key
    16 [certs] Generating "front-proxy-ca" certificate and key
    17 [certs] Generating "front-proxy-client" certificate and key
    18 [certs] Generating "etcd/ca" certificate and key
    19 [certs] Generating "etcd/server" certificate and key
    20 [certs] etcd/server serving cert is signed for DNS names [master localhost] and IPs [100.0.0.5 127.0.0.1 ::1]
    21 [certs] Generating "etcd/peer" certificate and key
    22 [certs] etcd/peer serving cert is signed for DNS names [master localhost] and IPs [100.0.0.5 127.0.0.1 ::1]
    23 [certs] Generating "etcd/healthcheck-client" certificate and key
    24 [certs] Generating "apiserver-etcd-client" certificate and key
    25 [certs] Generating "sa" key and public key
    26 [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
    27 [kubeconfig] Writing "admin.conf" kubeconfig file
    28 [kubeconfig] Writing "kubelet.conf" kubeconfig file
    29 [kubeconfig] Writing "controller-manager.conf" kubeconfig file
    30 [kubeconfig] Writing "scheduler.conf" kubeconfig file
    31 [control-plane] Using manifest folder "/etc/kubernetes/manifests"
    32 [control-plane] Creating static Pod manifest for "kube-apiserver"
    33 [control-plane] Creating static Pod manifest for "kube-controller-manager"
    34 W0120 19:36:25.214489   22147 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
    35 [control-plane] Creating static Pod manifest for "kube-scheduler"
    36 W0120 19:36:25.216005   22147 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
    37 [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
    38 [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
    39 [apiclient] All control plane components are healthy after 22.502951 seconds
    40 [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
    41 [kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
    42 [upload-certs] Skipping phase. Please see --upload-certs
    43 [mark-control-plane] Marking the node master as control-plane by adding the label "node-role.kubernetes.io/master=''"
    44 [mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
    45 [bootstrap-token] Using token: s3hig9.825tf3vs5eezmzny
    46 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
    47 [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
    48 [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
    49 [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
    50 [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
    51 [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
    52 [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
    53 [addons] Applied essential addon: CoreDNS
    54 [addons] Applied essential addon: kube-proxy
    55 
    56 Your Kubernetes control-plane has initialized successfully!
    57 
    58 To start using your cluster, you need to run the following as a regular user:
    59 
    60   mkdir -p $HOME/.kube
    61   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    62   sudo chown $(id -u):$(id -g) $HOME/.kube/config
    63 
    64 You should now deploy a pod network to the cluster.
    65 Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
    66   https://kubernetes.io/docs/concepts/cluster-administration/addons/
    67 
    68 Then you can join any number of worker nodes by running the following on each as root:
    69 
    70 kubeadm join 100.0.0.5:6443 --token s3hig9.825tf3vs5eezmzny 
    71     --discovery-token-ca-cert-hash sha256:84b93bb60dddec95b8df18bb89ad83d1f56c4a3bdc53601871a6a2d289f379f0 
    72 root@master:/etc/kubernetes# 
    View Code
    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    Then you can join any number of worker nodes by running the following on each as root:

      kubeadm join 100.0.0.5:6443 --token akqqzd.hxr9liuuz7naqglb
    --discovery-token-ca-cert-hash sha256:b63b4f6abf6d8afcd26a51de7785a30e529d69f73698c6e2f2b0925b6f1d6c9a

    6. 执行

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config

    7. 安装calico

    kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
    kubectl apply -f https://docs.projectcalico.org/v3.8/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml

    接着会发现kubectl get pods -A, calico-kube-controllers和calico-node 起不来,具体查看发现image没下下来,kubectl get pod -n kube-system看image,需要有下面两个, 

    docker pull calico/cni:v3.8.9

    docker pull calico/pod2daemon-flexvol:v3.8.9

    这两个镜像下载完成后,就会发现pod起来了,coredns pod也起来了,nodes也都ready了。

     8. 查看集群状态

    root@master:~# k get pods -A
    NAMESPACE     NAME                                      READY   STATUS    RESTARTS   AGE
    kube-system   calico-kube-controllers-75d555c48-7mhx4   1/1     Running   1          145m
    kube-system   calico-node-jdtkq                         1/1     Running   1          145m
    kube-system   coredns-66bff467f8-6qtkk                  1/1     Running   1          145m
    kube-system   coredns-66bff467f8-j6zzw                  1/1     Running   1          145m
    kube-system   etcd-master                               1/1     Running   4          145m
    kube-system   kube-apiserver-master                     1/1     Running   4          145m
    kube-system   kube-controller-manager-master            1/1     Running   4          145m
    kube-system   kube-proxy-j64zh                          1/1     Running   5          145m
    kube-system   kube-scheduler-master                     1/1     Running   4          145m
    root@master:~# k get nodes -A
    NAME     STATUS   ROLES    AGE    VERSION
    master   Ready    master   146m   v1.18.12

    三、node节点加入集群

    1. 获取token    
    root@master:~# kubeadm token list
    TOKEN                     TTL         EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS
    akqqzd.hxr9liuuz7naqglb   21h         2021-01-21T20:12:04+08:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token
    2.  获取ca证书sha256编码hash值
    root@master:~# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
    b63b4f6abf6d8afcd26a51de7785a30e529d69f73698c6e2f2b0925b6f1d6c9a
    
    3. kubeadm join 100.0.0.5:6443 --token akqqzd.hxr9liuuz7naqglb 
    >     --discovery-token-ca-cert-hash sha256:b63b4f6abf6d8afcd26a51de7785a30e529d69f73698c6e2f2b0925b6f1d6c9a
    4. 上master检查集群状态
    kubectl get nodes -A
    kubectl get pods -A
    5.修改role
    增加role标签

      root@master:~# kubectl label node node1 node-role.kubernetes.io/node=
      node/node1 labeled
      root@master:~# kubectl get nodes -A -o wide
      NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
      master Ready master 3h7m v1.18.12 100.0.0.5 <none> Ubuntu 16.04.6 LTS 4.15.0-45-generic docker://18.9.7
      node1 Ready node 26m v1.18.12 100.0.0.6 <none> Ubuntu 16.04.6 LTS 4.15.0-45-generic docker://18.9.7

      去掉role标签
      root@master:~# kubectl label node node1 node-role.kubernetes.io/node-
      node/node1 labeled
      root@master:~# kubectl get nodes -A -o wide
      NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
      master Ready master 3h8m v1.18.12 100.0.0.5 <none> Ubuntu 16.04.6 LTS 4.15.0-45-generic docker://18.9.7
      node1 Ready <none> 26m v1.18.12 100.0.0.6 <none> Ubuntu 16.04.6 LTS 4.15.0-45-generic docker://18.9.7

    四、FAQ

    1. K8S 集群reset配置

    kubeadm reset -f

    systemctl stop kubelet
    systemctl stop docker
    rm -rf /var/lib/cni/
    rm -rf /var/lib/kubelet/*
    rm -rf /etc/cni/
    rm -rf $HOME/.kube
    ifconfig cni0 down
    ifconfig flannel.1 down
    ifconfig docker0 down
    ip link delete cni0
    ip link delete flannel.1
    systemctl restart kubelet
    systemctl restart docker

    2. node join后,kubectl get pods查询,kube-proxy和calico pod一直起不来

    kubectl describe 状态发现,pull image 不成功,所以手动讲master上的,pause,kube-proxy,calico 的镜像导入到node节点上。

    end

  • 相关阅读:
    凹透镜
    三角形动点和将军饮马
    数学
    壮壮学习准则
    均值不等式,求极值
    2020年自贡中考数学真题,用的是花钱买的"几何画板",wechat:QZCS12
    90年高考题
    裂项:2005年初中数学竞赛题p32,4
    02-需求来源
    01-产品需求的内涵
  • 原文地址:https://www.cnblogs.com/reatual/p/14304675.html
Copyright © 2020-2023  润新知