• k8s部署


    环境初始化,所有节点

      1.配置hostname

    hostnamectl set-hostname master
    hostnamectl set-hostname node

      

      2.配置/etc/hosts

    127.0.0.1        localhost localhost.localdomain
    localhost4 localhost4.localdomain4
    ::1              localhost localhost.localdomain
    localhost6 localhost6.localdomain6    
    
    
    192.168.1.11     master
    192.168.1.12     node
          

      3.关闭防火墙、Selinux、swap

    # 停防火墙
    systemctl stop  firewalld
    systemctl disable firewalld
    
    
    关闭Selinux
    setenforce 0
    sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
    sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
    sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux
    sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config
    
    
    # 关闭Swap
    swapoff -a
    sed -i 's/.*swap.*/#&/' /etc/fstab
    
    # 加载br_netfilter
    modprobe br_netfilter

      4.配置内核参数  /etc/sysctl.d/k8s.conf 

    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    # 生效文件
    sysctl -p /etc/sysctl.d/k8s.conf 

      

      5.修改Linux 资源配置文件,调高ulimit最大打开数和systemctl管理的服务文件最大打开数 

    echo "* soft nofile 655360" >> /etc/security/limits.conf
    echo "* hard nofile 655360" >> /etc/security/limits.conf
    echo "* soft nproc 655360" >> /etc/security/limits.conf
    echo "* hard nproc 655360" >> /etc/security/limits.conf
    echo "* soft memlock unlimited" >> /etc/security/limits.conf
    echo "* hard memlock unlimited" >> /etc/security/limits.conf
    echo "DefaultLimitNOFILE=1024000" >> /etc/systemd/system.conf
    echo "DefaultLimitNPROC=1024000" >> /etc/systemd/system.conf

      6.配置国内tencent yum源、epel源、Kubernetes源地址

    wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo
    
    
    wget -O /etc/yum.repos.d/epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo
    
    
    yum clean all && yum makecache
    
    #配置国内Kubernetes源地址
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=1
    repo_gpgcheck=1
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF

       

      8.安装依赖包

    yum install -y conntrack ipvsadm ipset jq sysstat curl iptables libseccomp bash-completion
    yum-utils device-mapper-persistent-data lvm2 net-tools conntrack-tools vim libtool-ltdl

      9.配置时间同步,所有节点都需要

    yum install chrony –y
    systemctl enable chronyd.service && systemctl start chronyd.service 
    systemctl status chronyd.service
    chronyc sources

      10.初始化环境配置检查

        - 重启,做完以上所有操作,最好reboot重启一遍

        - ping 每个节点hostname 看是否能ping通
        - ssh 对方hostname看互信是否无密码访问成功
        - 执行date命令查看每个节点时间是否正确
        - 执行 ulimit -Hn 看下最大文件打开数是否是655360
        - cat /etc/sysconfig/selinux |grep disabled 查看下每个节点selinux是否都是disabled状态

    安装docker ,所有节点都需要装

      1.设置docker yum源

    yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

      2.安装docker

    # 列出docker 版本信息
    yum list docker-ce --showduplicates | sort -r
    
    #  安装docker 指定18.06.1
    yum install -y docker-ce-18.06.1.ce-3.el7
    systemctl restart docker
    # 配置镜像加速器和docker数据存放路径
    tee /etc/docker/daemon.json <<-'EOF'
    {
    "registry-mirrors": ["https://q2hy3fzi.mirror.aliyuncs.com"],
    "graph": "/tol/docker-data"
    }
    EOF

      

      3.启动docker 

    systemctl daemon-reload 
    systemctl restart docker
    systemctl enable docker
    systemctl status docker
    
    # docker --version

      安装kubeadm、kubelet、kubectl,所有节点

      • kubeadm: 部署集群用的命令
      • kubelet: 在集群中每台机器上都要运行的组件,负责管理pod、容器的生命周期
      • kubectl: 集群管理工具

      安装工具

    yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
    systemctl enable kubelet && systemctl start kubelet

    镜像下载准备

      1.初始化获取要下载的镜像列表

    # 查看依赖需要安装的镜像列表
    kubeadm config images list
    
    
    # 生成默认kubeadm.conf文件
    kubeadm config print init-defaults > kubeadm.conf

      

      2.绕过墙下载镜像的方法

    sed -i "s/imageRepository: .*/imageRepository: registry.aliyuncs.com/google_containers/g" kubeadm.conf

      

      3.指定kubeadm安装的Kubernetes版本

    sed -i "s/kubernetesVersion: .*/kubernetesVersion: v1.13.0/g" kubeadm.conf

      

      4.下载需要的镜像

    kubeadm config images pull --config kubeadm.conf
    
    docker images

      

      5.docker tag 镜像

    docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.13.0 k8s.gcr.io/kube-apiserver:v1.13.0
    docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.13.0 k8s.gcr.io/kube-controller-manager:v1.13.0
    docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.13.0 k8s.gcr.io/kube-scheduler:v1.13.0
    docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.13.0 k8s.gcr.io/kube-proxy:v1.13.0
    docker tag registry.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1
    docker tag registry.aliyuncs.com/google_containers/etcd:3.2.24 k8s.gcr.io/etcd:3.2.24
    docker tag registry.aliyuncs.com/google_containers/coredns:1.2.6 k8s.gcr.io/coredns:1.2.6

      

      6.docker rmi 清理下载的镜像

    docker rmi registry.aliyuncs.com/google_containers/kube-apiserver:v1.13.0
    docker rmi registry.aliyuncs.com/google_containers/kube-controller-manager:v1.13.0
    docker rmi registry.aliyuncs.com/google_containers/kube-scheduler:v1.13.0
    docker rmi registry.aliyuncs.com/google_containers/kube-proxy:v1.13.0
    docker rmi registry.aliyuncs.com/google_containers/pause:3.1
    docker rmi registry.aliyuncs.com/google_containers/etcd:3.2.24
    docker rmi registry.aliyuncs.com/google_containers/coredns:1.2.6

    部署master节点

      1.kubeadm init 初始化master节点

    # 定义POD的网段为: 172.22.0.0/16 ,api server地址就是master本机IP地址
    kubeadm init --kubernetes-version=v1.13.0 --pod-network-cidr=172.22.0.0/16 --apiserver-advertise-address=192.168.1.11
    ls /etc/kubernetes/

    # 如果需要可以 执行下面的命令重新初始化
    kubeadm reset
    kubeadm init --kubernetes-version=v1.13.0 --pod-network-cidr=172.22.0.0/16 --apiserver-advertise-address=192.168.1.11
    #  记录下面的信息
    kubeadm join 192.168.1.11:6443 --token iazwtj.v3ajyq9kyqftg3et --discovery-token-ca-cert-hash sha256:27aaefd2afc4e75fd34c31365abd3a7357bb4bba7552056bb4a9695fcde14ef5

        

      2.验证测试

    # 配置kubectl命令
    mkdir -p /root/.kube
    cp /etc/kubernetes/admin.conf /root/.kube/config
    
    # 执行获取pods列表命令,查看相关状态
    kubectl get pods --all-namespaces
    
    # 查看集群的健康状态
    kubectl get cs 

    部署calico网络

      1.下载calico 官方镜像

    docker pull calico/node:v3.1.4
    docker pull calico/cni:v3.1.4
    docker pull calico/typha:v3.1.4

      2.tag 这三个calico镜像

    docker tag calico/node:v3.1.4 quay.io/calico/node:v3.1.4
    docker tag calico/cni:v3.1.4 quay.io/calico/cni:v3.1.4
    docker tag calico/typha:v3.1.4 quay.io/calico/typha:v3.1.4

      3.删除原有镜像

    docker rmi calico/node:v3.1.4
    docker rmi calico/cni:v3.1.4
    docker rmi calico/typha:v3.1.4

      4.部署calico

    curl https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml -O
    
    kubectl apply -f rbac-kdd.yaml
    
    curl https:
    //docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.7/calico.yaml -O #把ConfigMap 下的 typha_service_name 值由none变成 calico-typha sed -i 's/typha_service_name: "none"/typha_service_name: "calico-typha"/g' calico.yaml #设置 Deployment 类目的 spec 下的replicas值为1 sed -i 's/replicas: 0/replicas: 1/g' calico.yaml #找到CALICO_IPV4POOL_CIDR,然后值修改成之前定义好的POD网段,我这里是172.22.0.0/16 sed -i 's/192.168.0.0/172.22.0.0/g' calico.yaml #把 CALICO_NETWORKING_BACKEND 值设置为 bird ,这个值是设置BGP网络后端模式
    sed -i '/name: CALICO_NETWORKING_BACKEND/{n;s/value: "none"/value: "bird"/;}' calico.yaml
    
    
    
    

      5.部署calico.yaml

    kubectl apply -f calico.yaml
    wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml kubectl apply -f kube-flannel.yml  
    kubectl get pods --all-namespaces

    部署node节点

      1.下载镜像

      

    docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.13.0
    docker pull registry.aliyuncs.com/google_containers/pause:3.1
    docker pull calico/node:v3.1.4
    docker pull calico/cni:v3.1.4
    docker pull calico/typha:v3.1.4
    docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.13.0 k8s.gcr.io/kube-proxy:v1.13.0
    docker tag registry.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1
    docker tag calico/node:v3.1.4 quay.io/calico/node:v3.1.4
    docker tag calico/cni:v3.1.4 quay.io/calico/cni:v3.1.4
    docker tag calico/typha:v3.1.4 quay.io/calico/typha:v3.1.4
    docker rmi registry.aliyuncs.com/google_containers/kube-proxy:v1.13.0
    docker rmi registry.aliyuncs.com/google_containers/pause:3.1
    docker rmi calico/node:v3.1.4
    docker rmi calico/cni:v3.1.4
    docker rmi calico/typha:v3.1.4

      2.把node加入到集群

    kubeadm join 192.168.1.11:6443 --token iazwtj.v3ajyq9kyqftg3et --discovery-token-ca-cert-hash sha256:27aaefd2afc4e75fd34c31365abd3a7357bb4bba7552056bb4a9695fcde14ef5

      3.在master上查看

    kubectl get nodes

    部署dashboard

       1. 生成私钥和证书签名请求 

    mkdir -p /etc/kubernetes/certs
    cd /etc/kubernetes/certs
    openssl genrsa -des3 -passout pass:x -out dashboard.pass.key 2048
    openssl rsa -passin pass:x -in dashboard.pass.key -out dashboard.key
    # 删除刚才生成的dashboard.pass.key
    rm -rf dashboard.pass.key
    
    openssl req -new -key dashboard.key -out dashboard.csr
    
    # 生成SSL证书
    openssl x509 -req -sha256 -days 365 -in dashboard.csr -signkey dashboard.key -out dashboard.crt

      

      2.创建secret

    kubectl create secret generic kubernetes-dashboard-certs --from-file=/etc/kubernetes/certs -n kube-system

      

      3.下载dashboard镜像、tag镜像(在全部节点上)

    docker pull registry.cn-hangzhou.aliyuncs.com/kubernete/kubernetes-dashboard-amd64:v1.10.0
    
    docker tag registry.cn-hangzhou.aliyuncs.com/kubernete/kubernetes-dashboard-amd64:v1.10.0 k8s.gcr.io/kubernetes-dashboard:v1.10.0
    
    docker rmi registry.cn-hangzhou.aliyuncs.com/kubernete/kubernetes-dashboard-amd64:v1.10.0

      

      4.下载 kubernetes-dashboard.yaml 部署文件(在master上执行) 

      1 ---
      2 apiVersion: v1
      3 kind: ServiceAccount
      4 metadata:
      5   labels:
      6     k8s-app: kubernetes-dashboard
      7   name: kubernetes-dashboard
      8   namespace: kube-system
      9 ---
     10 kind: Role
     11 apiVersion: rbac.authorization.k8s.io/v1
     12 metadata:
     13   name: kubernetes-dashboard-minimal
     14   namespace: kube-system
     15 rules:
     16 - apiGroups: [""]
     17   resources: ["secrets"]
     18   verbs: ["create"]
     19 - apiGroups: [""]
     20   resources: ["configmaps"]
     21   verbs: ["create"]
     22 - apiGroups: [""]
     23   resources: ["secrets"]
     24   resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
     25   verbs: ["get", "update", "delete"]
     26 - apiGroups: [""]
     27   resources: ["configmaps"]
     28   resourceNames: ["kubernetes-dashboard-settings"]
     29   verbs: ["get", "update"]
     30 - apiGroups: [""]
     31   resources: ["services"]
     32   resourceNames: ["heapster"]
     33   verbs: ["proxy"]
     34 - apiGroups: [""]
     35   resources: ["services/proxy"]
     36   resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
     37   verbs: ["get"]
     38 ---
     39 apiVersion: rbac.authorization.k8s.io/v1
     40 kind: RoleBinding
     41 metadata:
     42   name: kubernetes-dashboard-minimal
     43   namespace: kube-system
     44 roleRef:
     45   apiGroup: rbac.authorization.k8s.io
     46   kind: Role
     47   name: kubernetes-dashboard-minimal
     48 subjects:
     49 - kind: ServiceAccount
     50   name: kubernetes-dashboard
     51   namespace: kube-system
     52 ---
     53 kind: Deployment
     54 apiVersion: apps/v1beta2
     55 metadata:
     56   labels:
     57     k8s-app: kubernetes-dashboard
     58   name: kubernetes-dashboard
     59   namespace: kube-system
     60 spec:
     61   replicas: 1
     62   revisionHistoryLimit: 10
     63   selector:
     64     matchLabels:
     65       k8s-app: kubernetes-dashboard
     66   template:
     67     metadata:
     68       labels:
     69         k8s-app: kubernetes-dashboard
     70     spec:
     71       containers:
     72       - name: kubernetes-dashboard
     73         image: k8s.gcr.io/kubernetes-dashboard:v1.10.0
     74         ports:
     75         - containerPort: 8443
     76           protocol: TCP
     77         args:
     78           - --auto-generate-certificates
     79         volumeMounts:
     80         - name: kubernetes-dashboard-certs
     81           mountPath: /certs
     82         - mountPath: /tmp
     83           name: tmp-volume
     84         livenessProbe:
     85           httpGet:
     86             scheme: HTTPS
     87             path: /
     88             port: 8443
     89           initialDelaySeconds: 30
     90           timeoutSeconds: 30
     91       volumes:
     92       - name: kubernetes-dashboard-certs
     93         secret:
     94           secretName: kubernetes-dashboard-certs
     95       - name: tmp-volume
     96         emptyDir: {}
     97       serviceAccountName: kubernetes-dashboard
     98       tolerations:
     99       - key: node-role.kubernetes.io/master
    100         effect: NoSchedule
    101 ---
    102 kind: Service
    103 apiVersion: v1
    104 metadata:
    105   labels:
    106     k8s-app: kubernetes-dashboard
    107   name: kubernetes-dashboard
    108   namespace: kube-system
    109 spec:
    110   ports:
    111     - port: 443
    112       targetPort: 8443
    113       nodePort: 30005
    114   type : NodePort
    115   selector:
    116     k8s-app: kubernetes-dashboard
    View Code

      5 创建dashboard的pod

    kubectl create -f kubernetes-dashboard.yaml

      6.查看服务器运行状态

    kubectl get deployment kubernetes-dashboard -n kube-system
    kubectl --namespace kube-system get pods -o wide
    kubectl get services kubernetes-dashboard -n kube-system
    netstat -ntlp|grep 30005

      

      7. Dashboard BUG处理

      kubectl create -f kube-dashboard-access.yaml

    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: kubernetes-dashboard-minimal
      namespace: kube-system
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: cluster-admin
    subjects:
    - kind: ServiceAccount
      name: kubernetes-dashboard
      namespace: kube-system
    
    ---

      

  • 相关阅读:
    C++11 lambda表达式(lambda expression)
    win 10 relog.exe 下载地址
    检测闩锁/自旋锁争用
    关于sql 锁和并发的一些记录
    FAST number_rows 意义解释
    网站实施SEO的步骤
    搜索引擎高级搜索指令浅析
    关于遇到高并发时候的一些总结
    Autofac 设置方法拦截器的两种方式
    C# MVC 进入Action 方法之后怎么使用MVC参数验证模型
  • 原文地址:https://www.cnblogs.com/ray-mmss/p/10422969.html
Copyright © 2020-2023  润新知