• k8s 1.15.1 集群安装


    k8s 集群安装

    1)初始化:

    每台机器上均需要执行

    yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git
    
    # 关闭防火墙 安装,清空 iptables
    systemctl stop firewalld && systemctl disable firewalld
    yum -y install iptables-services && systemctl start iptables && systemctl enable ipstables && iptables -F && service iptables save
    
    # 关闭虚拟内存
    swapoff -a && sed -i '/ swap / s/^(.*)$/#1/g' /etc/fstab
    
    # 关闭 Selinux
    setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
    
    
    # 调整内核参数,对于k8s
    cat > k8s.conf << EOF
    net.bridge.bridge-nf-call-iptables=1
    net.bridge.bridge-nf-call-ip6tables=1
    net.ipv4.ip_forward=1
    net.ipv4.tcp_tw_recycle=0
    vm.swappiness=0
    vm.overcommit_memory=1
    vm.panic_on_oom=0
    fs.inotify.max_user_instances=8192
    fs.inotify.max_user_watches=1048576
    fs.file-max=52706963
    fs.nr_open=52706963
    net.ipv6.conf.all.disable_ipv6=1
    EOF
    cp k8s.conf /etc/sysctl.d/k8s.conf
    sysctl -p /etc/sysctl.d/k8s.conf
    
    
    # 设置 rsyslogd 和 systemd journald
    # 持久化保存日志的目录
    mkdir /var/log/journal
    mkdir /etc/systemd/journald.conf.d
    cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
    [Journal]
    # 持久化保存到磁盘
    Storage=persistent
    
    # 压缩历史日志
    Compress=yes
    
    SyncIntervalSec=5m
    RateLimitInterval=30s
    RateLimitBurst=1000
    
    # 最大占用空间10g
    SystemMaxUse=10G
    
    # 单个日志文件最大 200m
    SystemMaxFileSize=200m
    
    # 日志保存时间2周
    MaxRetentionSec=2week
    
    # 不将日志转发到 syslog
    ForwardToSyslog=no
    EOF
    
    systemctl restart systemd-journald
    

    升级内核为 4.44

    rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
    # 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再装一次!
    yum --enablerepo=elrepo-kernel install -y kernel-lt
    #  设置开机从新内核启动
    grub2-set-default "CentOS linux (4.4.182-1.el7.elrepo.x86_64) 7 (Core)"
    

    kube-proxy开启ipvs的前置条件

    modprobe br_netfilter
    cat > /etc/sysconfig/modules/ipvs.modules <<EOF
    #!/bin/bash
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    chmod 755 /etc/sysconfig/modules/ipvs.modules && bash
    /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
    

    安装 Docker

    
    yum install -y yum-utils device-mapper-persistent-data lvm2
    
    yum-config-manager 
     --add-repo 
     http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
     
    yum update -y && yum install -y docker-ce
    
    #  也可以通过以下命令下载指定docker版本
    export VERSION=18.09 && curl -sSL get.docker.com | sh
    
    systemctl start docker
    
    cat > /etc/docker/daemon.json <<EOF
    {
    	"exec-opts": ["native.cgroupdriver=systemd"],
    	"log-driver": "json-file",
    	"log-opts": {
    		"max-size": "100m"
    	}
    }
    EOF
    
    mkdir -p /etc/systemd/system/docker.service.d
    systemctl daemon-reload && systemctl restart docker && systemctl enable docker
    

    安装 Kubeadm (主从配置)

    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
    http://mirros.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    
    yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
    systemctl enable kubelet.service
    

    初始化主节点

    # 需要拉取kubernetes的docker镜像
    # 生成kubeadm配置文件,修改配置文件里的images下载源、是否允许Master允许业务Pod、网络配置信息
    kubeadm config print init-defaults > kubeadm.conf
    
    vi kubeadm.conf
    //修改默认kubeadm初始化参数如下
    imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
    kubernetesVersion: v1.15.1
    nodeRegistration:
      taints:
      - effect: PreferNoSchedule
        key: node-role.kubernetes.io/master
    localAPIEndpoint:
      advertiseAddress: 10.10.10.10   # 这里甜master节点ip
    networking:
      dnsDomain: cluster.local
      podSubnet: 192.168.0.0/16
      serviceSubnet: 172.18.0.0/16
    
    
    # 使用自定义配置拉取 images
    kubeadm config images pull --config kubeadm.conf
    
    
    # 修改pull images的tag
    # 从国内镜像网站下载的Images需要re tag到k8s.gcr.io
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.15.1 k8s.gcr.io/kube-scheduler:v1.15.1
    docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.15.1
    
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.15.1 k8s.gcr.io/kube-proxy:v1.15.1
    docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.15.1
    
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.15.1 k8s.gcr.io/kube-controller-manager:v1.15.1
    docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.15.1
    
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.15.1 k8s.gcr.io/kube-apiserver:v1.15.1
    docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.15.1
    
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.3.1 k8s.gcr.io/coredns:1.3.1
    docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.3.1
    
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10 k8s.gcr.io/etcd:3.3.10
    docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10
    
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1
    docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
    
    
    # 确认 re tag 成功
    docker images
    
    ## 以上全部节点都需要执行
    
    # 部署 master 节点
    # 初始化 kubeadm
    kubeadm init --config /root/kubeadm.conf
    
    
    # 部署网络
    wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    
    kubectl create -f kube-flannel.yml
    
    kubectl get pod -n kube-system
    
    
    
    
  • 相关阅读:
    解决Uploadify 3.2上传控件加载导致的GET 404 Not Found问题
    Intellij idea的Dependencies波浪线
    Web.xml配置详解之context-param
    The superclass "javax.servlet.http.HttpServlet" was not found on the Java Build Path(Myeclipse添加Server Library)
    html5 video mp4播放不了问题
    切片优化小拾
    解决video标签的兼容性
    css module.css demo
    Gnet 响应式官网开发总结
    前端小总结
  • 原文地址:https://www.cnblogs.com/HouZhenglan/p/11951284.html
Copyright © 2020-2023  润新知