节点规划
192.168.188.133 centos7 2U2G 50G k8s-nfs
搭建nfs服务
#master节点安装nfs
yum -y install nfs-utils
#创建nfs目录
mkdir -p /nfs/data/
#修改权限
chmod -R 777 /nfs/data
#编辑export文件
vim /etc/exports
/nfs/data *(rw,no_root_squash,sync)
#配置生效
exportfs -r
#查看生效
exportfs
#启动rpcbind、nfs服务
systemctl restart rpcbind && systemctl enable rpcbind
systemctl restart nfs && systemctl enable nfs
#查看 RPC 服务的注册状况
rpcinfo -p localhost
#showmount测试
showmount -e 192.168.188.133
#所有节点安装客户端
yum -y install nfs-utils
systemctl start nfs && systemctl enable nfs
至此NFS 服务器搭建完成,目录为 /nfs/data
测试
利用NFS client provisioner动态提供Kubernetes后端存储卷
利用NFS Server给Kubernetes作为持久存储的后端,并且动态提供PV。前提条件是有已经安装好的NFS服务器,并且NFS服务器与Kubernetes的Slave节点都能网络连通。
所有下文用到的文件来自于git clone https://github.com/kubernetes-incubator/external-storage.git的nfs-client目录
nfs-client-provisioner:nfs-client-provisioner 是一个Kubernetes的简易NFS的外部provisioner,本身不提供NFS,需要现有的NFS服务器提供存储
PV以 ${namespace}-${pvcName}-${pvName}的命名格式提供(在NFS服务器上)
PV回收的时候以 archieved-${namespace}-${pvcName}-${pvName} 的命名格式(在NFS服务器上)
安装部署
修改deployment文件并部署 deploy/deployment.yaml
需要修改的地方只有NFS服务器所在的IP地址,以及NFS服务器共享的路径(/ifs/kubernetes),两处都需要修改为你实际的NFS服务器和共享目录
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: registry.cn-hangzhou.aliyuncs.com/open-ali/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 192.168.188.133
- name: NFS_PATH
value: /home/nfs/data
volumes:
- name: nfs-client-root
nfs:
server: 192.168.188.133
path: /home/nfs/data
修改StorageClass文件并部署 deploy/class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: default
provisioner: fuseim.pri/ifs
设置这个default名字的SC为Kubernetes的默认存储后端
kubectl patch storageclass default -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
# kubectl get sc
NAME PROVISIONER AGE
default (default) fuseim.pri/ifs 44m
授权
如果您的集群启用了RBAC,或者您正在运行OpenShift,则必须授权provisioner
kubectl apply -f rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
测试创建PVC
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "es-data-db"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
kubectl create -f deploy/test-claim.yaml
]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-claim Bound pvc-c840caaa-2977-11ea-9ae2-000c29ff9863 1Mi RWX es-data-db 14m
测试创建POD
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: busybox:1.24
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/SUCCESS && exit 0 || exit 1"
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-claim
kubectl create -f deploy/test-pod.yaml
]# kubectl get pods | grep test
test-pod 0/1 Completed 0 11m
在NFS服务器上的共享目录下的卷子目录中检查创建的NFS PV卷下是否有”SUCCESS” 文件
]# cd /home/nfs/data/
[root@efk-mysql data]# ll
total 0
drwxrwxrwx 2 root root 21 Dec 28 21:44 default-test-claim-pvc-c840caaa-2977-11ea-9ae2-000c29ff9863
drwxrwxrwx 2 root root 6 Dec 28 21:39 efk-data-es-cluster-0-pvc-0d4dc2a7-2957-11ea-9ae2-000c29ff9863
drwxrwxrwx 2 root root 6 Dec 28 21:39 efk-elasticsearch-logging-elasticsearch-logging-0-pvc-17148f41-295c-11ea-9ae2-000c29ff9863
[root@efk-mysql data]# ll default-test-claim-pvc-c840caaa-2977-11ea-9ae2-000c29ff9863
total 0
-rw-r--r-- 1 root root 0 Dec 28 21:44 SUCCESS
[root@efk-mysql data]#
删除测试POD
kubectl delete -f deploy/test-pod.yaml
删除测试PVC
kubectl delete -f deploy/test-claim.yaml
在NFS服务器上的共享目录下查看NFS的PV卷回收以后是否名字以archived开头
]# ll
total 0
drwxrwxrwx 2 root root 21 Dec 28 21:44 archived-default-test-claim-pvc-c840caaa-2977-11ea-9ae2-000c29ff9863
drwxrwxrwx 2 root root 6 Dec 28 21:39 efk-data-es-cluster-0-pvc-0d4dc2a7-2957-11ea-9ae2-000c29ff9863
drwxrwxrwx 2 root root 6 Dec 28 21:39 efk-elasticsearch-logging-elasticsearch-logging-0-pvc-17148f41-295c-11ea-9ae2-000c29ff9863