k8s集群搭建是参照别人的,链接找不到了。需要5台机器,3主2从,主最少搭建3台才能实现高可用。
流程jenkins打包代码 >> 把打包代码生成镜像 >> 备份镜像库旧镜像 >> 新镜像上传镜像库 >> k8s去镜像库拉取镜像 >> 运行yaml文件生成更新服务(jenkins执行的脚本和yaml文件都是文章下面)
#关闭firewall
systemctl disable firewalld
systemctl stop firewalld
setenforce 0 #临时关闭selinux
#永久关闭selinux去配置文件中/etc/sysconfig/selinux
hostnamectl set-hostname master1 #修改主机名,每台服务器都不一样
#配置hosts文件
cat /etc/hosts
192.168.51.213 master1
192.168.51.214 master2
192.168.51.211 node1
192.168.51.212 node2
#master1配置无密码登录(只有master1机器需要这步)
ssh-keygen -t rsa
ssh-copy-id master2
ssh-copy-id node1
ssh-copy-id node2
#开启转发
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
sysctl --system
#禁用交换分区
swapoff -a
#同步时间
ntpdate ntp1.aliyun.com
#安装启用ipvs作为转发工具
cat > /etc/sysconfig/modules/ipvs.modules <<EOFmodprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#修改阿里云yum源
cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
wget http://mirrors.aliyun.com/repo/Centos-7.repo -O /etc/yum.repos.d/CentOS-Base.repo
wget http://mirrors.aliyun.com/repo/epel-7.repo -O /etc/yum.repos.d/epel.repo
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
#安装配置keeplived和haproxy(只有2台master需要)
yum install -y keepalived haproxy
#修改keepalived配置文件master1的priority为100,node-02的priority为90,其他配置一样
cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
liumingtao@gxcards.com
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_1
}
vrrp_instance VI_1 {
state MASTER
interface ens192
lvs_sync_daemon_inteface ens192
virtual_router_id 88
advert_int 1
priority 100
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.51.210/24
}
}
#修改haproxy的配置文件,配置一样
cat /etc/haproxy/haproxy.cfg
global
chroot /var/lib/haproxy
daemon
group haproxy
user haproxy
log 127.0.0.1:514 local0 warning
pidfile /var/lib/haproxy.pid
maxconn 20000
spread-checks 3
nbproc 8
defaults
log global
mode tcp
retries 3
option redispatch
listen https-apiserver
bind 192.168.51.210:8443
mode tcp
balance roundrobin
timeout server 15s
timeout connect 15s
server apiserver01 192.168.51.213:6443 check port 6443 inter 5000 fall 5
server apiserver02 192.168.51.214:6443 check port 6443 inter 5000 fall 5
#启用keepalived和haproxy
systemctl enable keepalived && systemctl start keepalived
systemctl enable haproxy && systemctl start haproxy
#安装k8s相关组件(4台都需要)
yum install -y kubelet kubeadm kubectl ipvsadm ipset docker-ce
#启动docker
systemctl enable docker && systemctl start docker
#设置kubelet开机自启动
systemctl enable kubelet
#把集群默认配置生成为配置文件并做修改(只有master1需要此操作)
kubeadm config print init-defaults > kubeadm-init.yaml
cat kubeadm-init.yaml
#修改了advertiseAddress为master1的ip,controlPlaneEndpoint为192.168.51.210:8443,imageRepository修改为registry.cn-hangzhou.aliyuncs.com/google_containers用于在阿里云拉取镜像。podSubnet添加了ip10.244.0.0/16,这个用于flannel网络,不设置或设置错flannel不会安装成功。最后加3行,修改为ipvs转发模式
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.51.213
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: master1
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.51.210:8443"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
#拉取集群所需要镜像(master1操作)
kubeadm config images pull --config kubeadm-init.yaml
#初始化集群
kubeadm init --config kubeadm-init.yaml
#安装集群命令(master1操作)
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#执行此脚本把master1证书拷贝到master2上
USER=root
CONTROL_PLANE_IPS="master2"
for host in ${CONTROL_PLANE_IPS}; do
ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"
scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done
#master1初始化集群后生成2条命令类似
kubeadm join 192.168.51.210:8443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:c8195afaff02deb8f263d59e81a6abf0efaf52d799df07e555d221e9b0feb301 --experimental-control-plane --ignore-preflight-errors=all #这条在master2上执行,执行时间很慢耐心等待
#master2命令执行完后安装集群命令工具
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubeadm join 192.168.51.210:8443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:c8195afaff02deb8f263d59e81a6abf0efaf52d799df07e555d221e9b0feb301 #这条在node的2个节点执行,执行时间快
#安装网络攻击
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml (master1上执行)
#执行命令查看所有的节点有没有加入集群
kubectl get nodes
#执行命令查看安装的k8s组件是否成功(所有STATUS都是Runnig)
kubectl get pod -n kube-system
#执行命令查看集群网络是否正常
ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.96.0.1:443 rr
-> 192.168.51.213:6443 Masq 1 0 0
-> 192.168.51.214:6443 Masq 1 1 0
TCP 10.96.0.10:53 rr
-> 10.244.1.2:53 Masq 1 0 0
-> 10.244.1.3:53 Masq 1 0 0
TCP 10.96.0.10:9153 rr
-> 10.244.1.2:9153 Masq 1 0 0
-> 10.244.1.3:9153 Masq 1 0 0
UDP 10.96.0.10:53 rr
-> 10.244.1.2:53 Masq 1 0 0
-> 10.244.1.3:53 Masq 1 0 0
# ---完-----
jenkins拉取打包代码后,执行的脚本
#!/bin/bash
##编写dockerfile,把jar包导入docker镜像
cat <<EOF>>/var/lib/jenkins/workspace/app-web-user/Dockerfile
FROM centos7-java-webuser
RUN mkdir -p /app/web-user/
COPY default/web-user/target/app-web-user.jar /app/web-user/
EXPOSE 12220
EXPOSE 13220
WORKDIR /app/web-user/
ENTRYPOINT ["/usr/local/java/jdk1.8.0_101/bin/java","-Dsun.misc.URLClassPath.disableJarChecking=true -Xmx128M -Xms128M -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=12220","-jar","app-web-user.jar"]
EOF
##定义镜像后缀时间
DATE=`date +%y%m%d%H%M`
cd /var/lib/jenkins/workspace/app-web-user
##利用dockerfile生成docker镜像
docker build -t 192.168.51.211:5000/webuser_$DATE .
##将镜像导入本地私有镜像库
docker push 192.168.51.211:5000/webuser_$DATE
###修改启动容器镜像为最新版本
sed -i "14s/.*/ image: 192.168.51.211:5000/webuser_$DATE/g" /k8s/web-user/web-user-pod.yaml
###k8s利用最新镜像启动docker容器
/usr/bin/kubectl apply -f /k8s/web-user/
k8s应用的yaml文件
cd /k8s/web-user/
cat web-user-pod.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: web-user
spec:
replicas: 2
template:
metadata:
labels:
app: web-user
spec:
containers:
- name: web-user
image: 192.168.51.211:5000/webuser_1908211449
ports:
- containerPort: 13220
cat web-user-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: web-user
spec:
ports:
- name: web-user-svc
port: 13220
targetPort: 13220
nodePort: 32221
selector:
app: web-user
type: NodePort