nfs服务器
mkdir /data/sc
chmod 777 -R /data/sc
apt-get -y install nfs-common nfs-kernel-server
echo "/data *(rw,no_root_squash,insecure,sync)" > /etc/exports
systemctl restart nfs-server
showmount -e localhost
创建serveraccount
vi rbac.yaml
kubectl apply -f rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
创建storageclass
vi nfs-StorageClass.yaml
kubectl apply -f nfs-StorageClass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: default-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致
parameters:
archiveOnDelete: "false"
创建NFS provisioner ( 官方github地址https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner)
vi nfs-provisioner.yaml
kubectl apply -f nfs-provisioner.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: default-nfs-storage #provisioner名称,请确保该名称与 nfs-StorageClass.yaml文件中的provisioner名称保持一致
- name: NFS_SERVER
value: 10.10.110.212 #NFS Server IP地址
- name: NFS_PATH
value: /data/sc #NFS挂载卷
volumes:
- name: nfs-client-root
nfs:
server: 10.10.110.212 #NFS Server IP地址
path: /data/sc #NFS 挂载卷
动态创建pvc
vi pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: common
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
storageClassName: managed-nfs-storage
deployment使用动态创建的pvc
vi deploy_svc.yaml
kubectl apply -f deploy_svc.yaml
---
apiVersion: v1
kind: Service
metadata:
name: nginx-test
spec:
type: NodePort
selector:
app: nginx-test
ports:
- name: nginx-test
port: 80
targetPort: 80
protocol: TCP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-test
spec:
replicas: 1
selector:
matchLabels:
app: nginx-test
template:
metadata:
labels:
app: nginx-test
spec:
containers:
- name: nginx-test
image: nginx
imagePullPolicy: Always
ports:
- containerPort: 80
volumeMounts:
- mountPath: /data/
name: commonmount
- mountPath: /logs
name: logs-pilot
volumes:
- name: commonmount
persistentVolumeClaim:
claimName: common
- emptyDir: {}
name: logs-pilot
StatefulSet动态申请pvc
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: my-nginx-demo
labels:
app.kubernetes.io/name: my-nginx-demo
app.kubernetes.io/version: "1.0"
spec:
replicas: 1
serviceName: my-nginx-demo-svc
selector:
matchLabels:
app.kubernetes.io/name: my-nginx-demo
template:
metadata:
labels:
app.kubernetes.io/name: my-nginx-demo
spec:
restartPolicy: Always
containers:
- name: my-nginx-demo
image: "nginx:1.17.9"
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: html
volumeClaimTemplates:
- metadata:
name: html
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: managed-nfs-storage
---
apiVersion: v1
kind: Service
metadata:
name: my-nginx-demo-svc
labels:
app.kubernetes.io/name: my-nginx-demo
app.kubernetes.io/version: "1.0"
spec:
# type: ClusterIP
# type: LoadBalancer
# type: NodePort
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: my-nginx-demo