• 02-使用kubeadm安装K8S的1.13.1环境


    K8S服务部署安装:

    10.20.164.246 M
    10.20.164.247 S
    10.20.164.248 S

    # 三台主机分别修改hostname

    hostnamectl set-hostname k8s-master
    hostnamectl set-hostname k8s-node1
    hostnamectl set-hostname k8s-node2

    # 修改hosts文件

    cat >>/etc/hosts<<EOF
    10.20.164.246 k8s-master
    10.20.164.247 k8s-node1
    10.20.164.248 k8s-node2
    EOF

    # 关闭防火墙:

    systemctl stop firewalld
    systemctl disable firewalld
    setenforce 0
    sed -i 's/SELINUX=permissive/SELINUX=disabled/' /etc/sysconfig/selinux

    # 创建/etc/sysctl.d/k8s.conf文件

    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.ip_forward = 1
    vm.swappiness = 0

    # 执行命令使修改生效

    modprobe br_netfilter
    sysctl -p /etc/sysctl.d/k8s.conf

    # 1.2kube-proxy开启ipvs的前置条件:

    cat > /etc/sysconfig/modules/ipvs.modules <<EOF
    #!/bin/bash
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

    #所有节点关闭 swap
    修改/etc/fstab文件注销:

    #UUID=4b4ff174-fdfb-4c19-b002-fc234ad15683 swap swap defaults,noatime 0 0

    # 执行命令:

    swapoff -a

    # ipset软件包yum install ipset。 为了便于查看ipvs的代理规则,最好安装一下管理工具ipvsadm yum install ipvsadm。

    yum install ipvsadm
    yum install ipset

    # 1.3安装Docker master

    # 安装docker的yum源: 所有节点,# 官方地址太慢改成阿里云的http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

    sudo yum install -y yum-utils device-mapper-persistent-data lvm2
    sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

    # 查看最新的Docker版本:

    yum list docker-ce.x86_64 --showduplicates |sort -r

    # 我们这里在各节点安装docker的18.06.1版本:
    # 更新 yum 缓存

    sudo yum makecache fast 
    sudo yum install -y --setopt=obsoletes=0 
    docker-ce-18.06.1.ce-3.el7

    # 启动docker

    sudo systemctl start docker
    sudo systemctl enable docker

    # 确认一下iptables filter表中FOWARD链的默认策略(pllicy)为ACCEPT:

    iptables -nvL

    # 安装kubeadm和kubelet
    # 下面在各节点安装kubeadm和kubelet: 使用阿里云的节点镜像

    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=1
    repo_gpgcheck=1
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg 
    https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF

    # 测试一下地址是否可以用:

    curl https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64

    # 执行安装命令:

    sudo yum makecache fast
    sudo yum install -y kubelet kubeadm kubectl

    # 使用kubeadm init初始化集群
    # 在各节点开机启动kubelet服务:

    sudo systemctl enable kubelet

    # 版本查看:

    kubelet --version
    Kubernetes v1.13.4

    # 为了应对网络不畅通的问题,国内网络环境只能提前手动下载相关镜像并重新打 tag :
    # 从dockerhub下载需要的镜像

    sudo docker pull mirrorgooglecontainers/kube-apiserver-amd64:v1.13.4
    sudo docker pull mirrorgooglecontainers/kube-controller-manager-amd64:v1.13.4
    sudo docker pull mirrorgooglecontainers/kube-scheduler-amd64:v1.13.4
    sudo docker pull mirrorgooglecontainers/kube-proxy-amd64:v1.13.4
    sudo docker pull mirrorgooglecontainers/pause:3.1
    sudo docker pull mirrorgooglecontainers/etcd-amd64:3.2.24
    sudo docker pull coredns/coredns:1.2.6

    # 修改dockerhub镜像tag为k8s.gcr.io

    sudo docker tag docker.io/mirrorgooglecontainers/kube-apiserver-amd64:v1.13.4 k8s.gcr.io/kube-apiserver:v1.13.4
    sudo docker tag docker.io/mirrorgooglecontainers/kube-controller-manager-amd64:v1.13.4 k8s.gcr.io/kube-controller-manager:v1.13.4
    sudo docker tag docker.io/mirrorgooglecontainers/kube-scheduler-amd64:v1.13.4 k8s.gcr.io/kube-scheduler:v1.13.4
    sudo docker tag docker.io/mirrorgooglecontainers/kube-proxy-amd64:v1.13.4 k8s.gcr.io/kube-proxy:v1.13.4
    sudo docker tag docker.io/mirrorgooglecontainers/pause:3.1 k8s.gcr.io/pause:3.1
    sudo docker tag docker.io/mirrorgooglecontainers/etcd-amd64:3.2.24 k8s.gcr.io/etcd:3.2.24
    sudo docker tag docker.io/coredns/coredns:1.2.6 k8s.gcr.io/coredns:1.2.6

    # 删除多余镜像

    sudo docker rmi mirrorgooglecontainers/kube-apiserver-amd64:v1.13.4
    sudo docker rmi mirrorgooglecontainers/kube-controller-manager-amd64:v1.13.4
    sudo docker rmi mirrorgooglecontainers/kube-scheduler-amd64:v1.13.4
    sudo docker rmi mirrorgooglecontainers/kube-proxy-amd64:v1.13.4
    sudo docker rmi mirrorgooglecontainers/pause:3.1
    sudo docker rmi mirrorgooglecontainers/etcd-amd64:3.2.24
    sudo docker rmi coredns/coredns:1.2.6


    # 进行初始化:

    kubeadm init 
    --kubernetes-version=v1.13.4 
    --pod-network-cidr=10.244.0.0/16 
    --apiserver-advertise-address=10.20.164.246 
    --ignore-preflight-errors=Swap

    # 成功:

    kubeadm join 10.20.164.246:6443 --token tyk4ka.lxi9wq43vt2iwwku --discovery-token-ca-cert-hash sha256:62ffe320e63bb66f09b1fcc8bb8e492d221f7bd1e9f56a1810f1714343c6f9b7

    # 执行下面的命令是配置常规用户如何使用kubectl访问集群:

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config

    # 查看集群状态:确认个组件都处于healthy状态。

    kubectl get cs

    # 集群初始化如果遇到问题,可以使用下面的命令进行清理:

    kubeadm reset
    ifconfig cni0 down
    ip link delete cni0
    ifconfig flannel.1 down
    ip link delete flannel.1
    rm -rf /var/lib/cni/

    # 安装Pod Network
    接下来安装flannel network add-on:

    kubectl create -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

    # 使用以下命令确保所有的Pod都处于Running状态

    kubectl get pod --all-namespaces -o wide

    # 同时我们看到主节点已经就绪:

    kubectl get nodes

    # 把node2 加入到集群里面:

    kubeadm join 10.20.164.246:6443 --token tyk4ka.lxi9wq43vt2iwwku --discovery-token-ca-cert-hash sha256:62ffe320e63bb66f09b1fcc8bb8e492d221f7bd1e9f56a1810f1714343c6f9b7 --ignore-preflight-errors=Swap

    # 移除节点:在master上执行

    kubectl drain k8s-node1 --delete-local-data --force --ignore-daemonsets
    kubectl get nodes
    kubectl delete node k8s-node1

    # 在k8s-node1节点上执行:

    sudo kubeadm reset

    # 在master上查看状态:
    假如 notready 的话 要重启一下docker服务;

    sudo systemctl restart docker
    sudo systemctl restart kubelet

    # 确保所有的POD节点都已经在running

    kubectl get pod --all-namespaces -o wide

    # 查看POD的节点状态。

    kubectl get pod -n kube-system

    # 核心组件状态:

    kubectl get cs

    # kube-proxy开启ipvs
    # 修改ConfigMap的kube-system/kube-proxy中的config.conf,mode: "ipvs":

    kubectl edit cm kube-proxy -n kube-system

    # 之后重启各个节点上的kube-proxy pod,在master节点运行:

    kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
    kubectl get pod -n kube-system | grep kube-proxy
    kubectl logs kube-proxy-XXXX -n kube-system

    # 日志中打印出了Using ipvs Proxier,说明ipvs模式已经开启。
    --------------------------------------------------------------------------------------------------------------------------------------------------

    [hejianping@k8s-master ~]$ kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
    pod "kube-proxy-459xt" deleted
    pod "kube-proxy-84mbq" deleted
    [hejianping@k8s-master ~]$ kubectl get pod -n kube-system | grep kube-proxy
    kube-proxy-gwbfm 1/1 Running 0 8s
    kube-proxy-xdxt5 1/1 Running 0 11s
    [hejianping@k8s-master ~]$ kubectl logs kube-proxy-4vn59 -n kube-system
    [hejianping@k8s-master ~]$ kubectl logs kube-proxy-gwbfm -n kube-system 
    I0320 01:20:00.818329 1 server_others.go:189] Using ipvs Proxier.
    W0320 01:20:00.818837 1 proxier.go:381] IPVS scheduler not specified, use rr by default
    I0320 01:20:00.818937 1 server_others.go:216] Tearing down inactive rules.
    I0320 01:20:00.874237 1 server.go:483] Version: v1.13.4
    I0320 01:20:00.890524 1 conntrack.go:52] Setting nf_conntrack_max to 786432
    I0320 01:20:00.890827 1 config.go:102] Starting endpoints config controller
    I0320 01:20:00.890848 1 config.go:202] Starting service config controller
    I0320 01:20:00.890876 1 controller_utils.go:1027] Waiting for caches to sync for service config controller
    I0320 01:20:00.890850 1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
    I0320 01:20:00.991006 1 controller_utils.go:1034] Caches are synced for endpoints config controller
    I0320 01:20:00.991009 1 controller_utils.go:1034] Caches are synced for service config controller
    ---------------------------------------------------------------------------------------------------------------------------------------------------

    # 查看ipvs的代理规则:

    sudo ipvsadm -ln

    kubeadm 常用命令:
    kubeadm init 启动一个master节点;
    kubeadm join 启动一个node节点,加入master;
    kubeadm upgrade 更新集群版本;
    kubeadm config 从1.8.0版本开始已经用处不大,可以用来view一下配置;
    kubeadm token 管理kubeadm join的token;
    kubeadm reset 把kubeadm init或kubeadm join做的更改恢复原状;
    kubeadm version打印版本信息;
    kubeadm alpha预览一些alpha特性的命令。

    如果你的环境迟迟都是NotReady状态,可以kubectl get pod -n kube-system看一下pod状态,一般可以发现问题,比如重启一下docker服务或者flannel的镜像下载失败~

  • 相关阅读:
    Swiper 自定义分页器 并实现多个用省略号显示
    Swiper插件 滚动自动切换标题
    HTML 点击返回按钮返回上一页,没有上一页转到首页
    HTML input 模仿Android原生焦点效果
    HTML基础篇(二、HTML文档结构)
    Vue开发 添加微信分享功能(全局分享)
    JS 命令模式(记读《JavaScript设计模式与开发实践》笔记)
    Vue中v-for配合使用Swiper插件问题
    permission-sudo获取权限
    使用es6模块化后打开页面报错
  • 原文地址:https://www.cnblogs.com/hejianping/p/10910447.html
Copyright © 2020-2023  润新知