• Kubernetes存储——NFS


    一、环境准备——搭建nfs(集群)

    1.1 服务器规划

    master(k8s集群) node1(k8s集群) node2(k8s集群) nfs 服务端
    192.168.99.201 192.168.99.202 192.168.99.203 192.168.99.204

    1.2 nfs服务端

    $ yum install -y nfs-utils
    $ systemctl enable nfs-server rpcbind --now
    # 所有服务端节点安装nfs并启动
    
    $ mkdir -p /data/nfs-volume && chmod -R 777 /data/nfs-volume
    # 创建nfs共享目录、授权
    
    $ cat > /etc/exports << EOF
    /data/nfs-volume 192.168.99.0/24(rw,sync,no_root_squash)
    EOF
    # 写入exports
    
    $ systemctl reload nfs-server
    # 启动nfs服务端
    
    # 使用如下命令进行验证
    $ showmount -e 192.168.99.204
    Export list for 192.168.99.204:
    /data/nfs-volume 192.168.99.0/24
    

    1.3 nfs客户端

    $ yum install -y nfs-utils
    # 所有使用nfs的k8s节点安装nfs客户端
    
    $ systemctl enable rpcbind --now
    # 启动rpcbind程序
    
    # 所有客户端也可以使用如下命令进行验证
    $ showmount -e 192.168.99.204
    Export list for 192.168.99.204:
    /data/nfs-volume 192.168.99.0/24
    
    $ mkdir /opt/nfs-volume
    $ mount -t nfs 192.168.99.204:/data/nfs-volume /opt/nfs-volume
    $ df -h | tail -1
    # 测试挂载并使用
    

    1.4 配置客户端开机自动挂载

    $ cat >> /etc/fstab << EOF
    192.168.99.204:/data/nfs-volume /opt/nfs-volume defaults,_netdev 0 0
    EOF
    

    二、配置k8s使用nfs持久化存储

    2.1 手动静态创建

    $ kubectl create ns nfs-pv-pvc
    $ cat > nfs-pv.yaml << EOF 
    apiVersion: v1            # 指定的api版本,要符合kubectl apiVersion规定,v1是稳定版本,必选参数
    kind: PersistentVolume    # k8s资源类型,PersistentVolume资源
    metadata:                 # 资源的元数据语句块,是针对kind对应资源的全局属性
      name: nfs-pv001         # 自定义名称nfs-pv001
    spec:                     # 规格语句块
      capacity:               # PV的存储空间语句块
        storage: 5Gi          # PV的具体存储空间大小,Mi表示1024进制
      accessModes:            # 访问模式,有三种:ReadWriteOnce、ReadOnlyMany、ReadWriteMany 
        - ReadWriteMany
      persistentVolumeReclaimPolicy: Retain   # 回收策略,有三种:Retain、Recycle、Delete 
      storageClassName: nfs    # 注意此处修改
      nfs:                     # NFS文件系统配置语句块 
        path: /data/nfs-volume # 在NFS文件系统上创建的共享文件目录
        server: 192.168.99.204 # NFS服务器的IP地址
    EOF
    
    $ cat > nfs-pvc.yaml << EOF 
    apiVersion: v1                # 指定的api版本,要符合kubectl apiVersion规定,v1是稳定版本,必选参数
    kind: PersistentVolumeClaim   # k8s资源类型
    metadata:                     # 资源的元数据语句块,是针对kind对应资源的全局属性
      name: nfs-pvc001            # PVC名称,自定义
      namespace: nfs-pv-pvc       # 指定命名空间
    spec:                         # 规格语句块
      accessModes:                # 访问模式
        - ReadWriteMany
      storageClassName: nfs       # 注意此处修改
      resources:                  # 访问模式下的资源语句块
        requests:                 # 请求语句块
          storage: 5Gi            # 请求存储空间
    EOF
    
    $ cat > nginx-apline.yaml << EOF
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: nginx-deployment
      namespace: nfs-pv-pvc
      labels:
        app: nginx
    spec:
      replicas: 1    # 注意此处修改
      selector:
        matchLabels:
          app: nginx
      template:
        metadata:
          labels:
            app: nginx
        spec:
          containers:
          - name: nginx
            image: nginx:alpine
            imagePullPolicy: IfNotPresent
            ports:
            - containerPort: 80
            volumeMounts:
            - name: nfs-pvc
              mountPath: "/usr/share/nginx/html"
          volumes:
          - name: nfs-pvc
            persistentVolumeClaim:
              claimName: nfs-pvc001   # 与pvc名字一样
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: my-svc-nginx-alpine
      namespace: nfs-pv-pvc
    spec:
      type: ClusterIP
      selector:
        app: nginx
      ports:
      - protocol: TCP
        port: 80
        targetPort: 80
    EOF
    

    在NFS服务端(192.168.99.204)对应NFS共享目录创建文件

    $ echo "2021-7-20" > /data/nfs-volume/index.html
    
    $ kubectl get pod -n nfs-pv-pvc -o custom-columns=':metadata.name'
    
    $ kubectl exec -it nginx-deployment-799b74d8dc-7fmnl -n nfs-pv-pvc -- cat  /usr/share/nginx/html/index.html
    
    $ kubectl get pod -n nfs-pv-pvc -owide
    $ kubectl get svc -n nfs-pv-pvc -owide
    

    访问验证

    $ kubectl get po -n nfs-pv-pvc -o custom-columns=':status.podIP' |xargs curl
    # 访问pod IP
    
    $ kubectl get svc -n nfs-pv-pvc -o custom-columns=':spec.clusterIP' |xargs curl
    # 访问svc IP
    

    2.2 动态创建NFS存储

    nfs-client-provisioner

    方式一:使用github上资源

    # git clone https://github.com/kubernetes-retired/external-storage.git
    
    # cd ~/external-storage/nfs-client/deploy
    

    方式二:手动创建资源文件

    $ mkdir my-nfs-client-provisioner && cd my-nfs-client-provisioner
    
    $ cat > rbac.yaml << EOF
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: nfs-client-provisioner
      # replace with namespace where provisioner is deployed
      namespace: default
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      name: nfs-client-provisioner-runner
    rules:
      - apiGroups: [""]
        resources: ["persistentvolumes"]
        verbs: ["get", "list", "watch", "create", "delete"]
      - apiGroups: [""]
        resources: ["persistentvolumeclaims"]
        verbs: ["get", "list", "watch", "update"]
      - apiGroups: ["storage.k8s.io"]
        resources: ["storageclasses"]
        verbs: ["get", "list", "watch"]
      - apiGroups: [""]
        resources: ["events"]
        verbs: ["create", "update", "patch"]
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      name: run-nfs-client-provisioner
    subjects:
      - kind: ServiceAccount
        name: nfs-client-provisioner
        # replace with namespace where provisioner is deployed
        namespace: default
    roleRef:
      kind: ClusterRole
      name: nfs-client-provisioner-runner
      apiGroup: rbac.authorization.k8s.io
    ---
    kind: Role
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      name: leader-locking-nfs-client-provisioner
      # replace with namespace where provisioner is deployed
      namespace: default
    rules:
      - apiGroups: [""]
        resources: ["endpoints"]
        verbs: ["get", "list", "watch", "create", "update", "patch"]
    ---
    kind: RoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      name: leader-locking-nfs-client-provisioner
      # replace with namespace where provisioner is deployed
      namespace: default
    subjects:
      - kind: ServiceAccount
        name: nfs-client-provisioner
        # replace with namespace where provisioner is deployed
        namespace: default
    roleRef:
      kind: Role
      name: leader-locking-nfs-client-provisioner
      apiGroup: rbac.authorization.k8s.io
    EOF
    
    • 1、kind: ServiceAccount: 定义一个服务账户,该账户负责向集群申请资源
    • 2、kind: ClusterRole: 定义集群角色
    • 3、kind: ClusterRoleBinding: 集群角色与服务账户绑定
    • 4、kind: Role: 角色
    • 5、kind: RoleBinding: 角色与服务账户绑定
    $ cat > class.yaml << EOF
    apiVersion: storage.k8s.io/v1
    kind: StorageClass
    metadata:
      name: managed-nfs-storage
    provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
    parameters:
      archiveOnDelete: "false"
    EOF
    
    $ cat > deployment.yaml << EOF
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: nfs-client-provisioner
      labels:
        app: nfs-client-provisioner
      # replace with namespace where provisioner is deployed
      namespace: default
    spec:
      replicas: 1
      strategy:
        type: Recreate
      selector:
        matchLabels:
          app: nfs-client-provisioner
      template:
        metadata:
          labels:
            app: nfs-client-provisioner
        spec:
          serviceAccountName: nfs-client-provisioner
          containers:
            - name: nfs-client-provisioner
              image: quay.io/external_storage/nfs-client-provisioner:latest
              volumeMounts:
                - name: nfs-client-root
                  mountPath: /persistentvolumes
              env:
                - name: PROVISIONER_NAME
                  value: fuseim.pri/ifs
                - name: NFS_SERVER
                  value: 192.168.99.204 # 注意此处修改
                - name: NFS_PATH
                  value: /data/nfs-volume/
          volumes:
            - name: nfs-client-root
              nfs:
                server: 192.168.99.204 # 注意此处修改
                path: /data/nfs-volume
    EOF
    
    • 修改成自己的nfs服务端ip(192.168.1.204)
    • 修改共享目录(/data/nfs-volume)
    $ cat > test-claim.yaml <<EOF
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: test-claim
      annotations:
        volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
    spec:
      accessModes:
        - ReadWriteMany
      resources:
        requests:
          storage: 1Mi
    EOF
    
    $ cat > test-pod.yaml << EOF
    kind: Pod
    apiVersion: v1
    metadata:
      name: test-pod
    spec:
      containers:
      - name: test-pod
        image: busybox:1.24
        command:
          - "/bin/sh"
        args:
          - "-c"
          - "touch /mnt/SUCCESS && exit 0 || exit 1"
        volumeMounts:
          - name: nfs-pvc
            mountPath: "/mnt"
      restartPolicy: "Never"
      volumes:
        - name: nfs-pvc
          persistentVolumeClaim:
            claimName: test-claim
    EOF
    

    部署

    $ kubectl apply -f rbac.yaml
    $ kubectl apply -f class.yaml
    $ kubectl apply -f deployment.yaml
    $ kubectl apply -f test-claim.yaml
    $ kubectl apply -f test-pod.yaml
    
    *************** 当你发现自己的才华撑不起野心时,就请安静下来学习吧!***************
  • 相关阅读:
    thinkphp--标签库
    thinkphp中的参数绑定
    thinkphp3.2.3子查询中遇到的错误
    开篇马克
    BST树、B-树、B+树、B*树
    linux shell编程之变量和bash配置文件(第一篇)
    linux awk进阶篇
    linux awk(gawk)
    linux sed命令
    linux basic
  • 原文地址:https://www.cnblogs.com/lvzhenjiang/p/15375746.html
Copyright © 2020-2023  润新知