StatefulSet 部署 zookeeper 集群
参考 k8s官网zookeeper集群的部署,数据挂着方式改成通过本地方式创建的pv; https://kubernetes.io/docs/tutorials/stateful-application/zookeeper/
本次共享存储使用的是 NFS;
创建pv
[root@node01 zk-cluster]# cat zk-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: zk-01
labels:
name: zk-cluster
type: nfs
spec:
nfs:
path: /data/zk-cluster/zk-01
server: 192.168.1.91
accessModes:
- "ReadWriteOnce"
capacity:
storage: 5Gi
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: zk-02
labels:
name: zk-cluster
type: nfs
spec:
nfs:
path: /data/zk-cluster/zk-02
server: 192.168.1.91
accessModes:
- "ReadWriteOnce"
capacity:
storage: 5Gi
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: zk-03
labels:
name: zk-cluster
type: nfs
spec:
nfs:
path: /data/zk-cluster/zk-03
server: 192.168.1.91
accessModes:
- "ReadWriteOnce"
capacity:
storage: 5Gi
persistentVolumeReclaimPolicy: Recycle
StatefulSet
[root@node01 zk-cluster]# cat statefulset-zk.yaml
apiVersion: v1
kind: Service
metadata:
name: zk-hs
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service
metadata:
name: zk-cs
labels:
app: zk
spec:
ports:
- port: 2181
name: client
selector:
app: zk
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk
spec:
selector:
matchLabels:
app: zk
serviceName: zk-hs
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: OrderedReady
template:
metadata:
labels:
app: zk
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
containers:
- name: kubernetes-zookeeper
#imagePullPolicy: Always
imagePullPolicy: ifNotPresent
image: "registry.k8s.com/test/zookeeper:1.0-3.4.10"
resources:
requests:
memory: "1Gi"
cpu: "0.5"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper
--servers=3
--data_dir=/var/lib/zookeeper/data
--data_log_dir=/var/lib/zookeeper/data/log
--conf_dir=/opt/zookeeper/conf
--client_port=2181
--election_port=3888
--server_port=2888
--tick_time=2000
--init_limit=10
--sync_limit=5
--heap=512M
--max_client_cnxns=60
--snap_retain_count=3
--purge_interval=12
--max_session_timeout=40000
--min_session_timeout=4000
--log_level=INFO"
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /var/lib/zookeeper
securityContext:
# runAsUser: 1000
fsGroup: 1000
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 5Gi
测试
查看每台机器的myid
[root@node01 zk-cluster]# for i in 0 1 2; do echo "myid zk-$i";kubectl exec zk-$i -- cat /var/lib/zookeeper/data/myid; done
myid zk-0
1
myid zk-1
2
myid zk-2
3
要获取zkStatefulSet
中每个Pod的完全合格域名(FQDN
),请使用以下命令
[root@node01 zk-cluster]# for i in 0 1 2; do kubectl exec zk-$i -- hostname -f; done
zk-0.zk-hs.default.svc.cluster.local
zk-1.zk-hs.default.svc.cluster.local
zk-2.zk-hs.default.svc.cluster.local