• kubernetes 使用ceph实现动态持久卷存储


    k8s使用ceph存储

    ceph提供底层存储功能,cephfs方式支持k8s的pv的3种访问模式​​ReadWriteOnce,ReadOnlyMany ,ReadWriteMany​​​ ,RBD支持​​ReadWriteOnce,ReadOnlyMany​​两种模式

    动态供给主要是能够自动帮你创建pv,需要多大的空间就创建多大的pv。k8s帮助创建pv,创建pvc就直接api调用存储类来寻找pv。

    如果是存储静态供给的话,会需要我们手动去创建pv,如果没有足够的资源,找不到合适的pv,那么pod就会处于pending等待的状态。而动态供给主要的一个实现就是StorageClass存储对象,其实它就是声明你使用哪个存储,然后帮你去连接,再帮你去自动创建pv。 

    使用Ceph RBD作为持久数据卷
    配置 rbd-provisioner
    1、编写yaml文件

    [root@k8s-master ~]# cat >external-storage-rbd-provisioner.yaml<<EOF
    apiVersion: v1
    kind: ServiceAccount
    metadata:
    name: rbd-provisioner
    namespace: kube-system
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: rbd-provisioner
    rules:
    - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
    - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
    - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
    - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
    - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
    - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["kube-dns"]
    verbs: ["list", "get"]
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: rbd-provisioner
    subjects:
    - kind: ServiceAccount
    name: rbd-provisioner
    namespace: kube-system
    roleRef:
    kind: ClusterRole
    name: rbd-provisioner
    apiGroup: rbac.authorization.k8s.io

    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: Role
    metadata:
    name: rbd-provisioner
    namespace: kube-system
    rules:
    - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["get"]
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
    name: rbd-provisioner
    namespace: kube-system
    roleRef:
    apiGroup: rbac.authorization.k8s.io
    kind: Role
    name: rbd-provisioner
    subjects:
    - kind: ServiceAccount
    name: rbd-provisioner
    namespace: kube-system
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: rbd-provisioner
    namespace: kube-system
    spec:
    selector:
    matchLabels:
    app: rbd-provisioner
    replicas: 1
    strategy:
    type: Recreate
    template:
    metadata:
    labels:
    app: rbd-provisioner
    spec:
    containers:
    - name: rbd-provisioner
    image: "registry.cn-chengdu.aliyuncs.com/ives/rbd-provisioner:v2.0.0-k8s1.11"
    env:
    - name: PROVISIONER_NAME
    value: ceph.com/rbd
    serviceAccount: rbd-provisioner
    EOF

    2、创建相关资源

    [root@k8s-master ~]# kubectl apply -f external-storage-rbd-provisioner.yaml

    [root@k8s-master ~]# kubectl get pods -n kube-system |grep rbd
    rbd-provisioner-7c77dcfd67-9xv2m 1/1 Running 0 59s

    配置 storageclass

    创建pod时,kubelet需要使用rbd命令去检测和挂载pv对应的ceph image,所以要在所有k8s的worker节点安装ceph客户端ceph-common。将ceph的ceph.client.admin.keyring和ceph.conf文件拷贝到master的/etc/ceph目录下。

    1、安装ceph-common(k8s所有工作节点)

    # yum -y install ceph-common
    1.
    2、创建 ​​osd pool​​,在ceph的mon或者admin节点

    [root@ceph_node1 ~]# ceph osd pool create kube 8
    pool 'kube' created

    [root@ceph_node1 ~]# ceph osd pool ls
    kube

    3、创建k8s访问ceph的用户,在ceph的mon或者admin节点

    [root@ceph_node1 ~]# ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=kube' -o ceph.client.kube.keyring
    1.
    4、查看key,在ceph的mon或者admin节点

    [root@ceph_node1 ~]# ceph auth get-key client.admin
    AQCzcPFeYnOoABAATaM1Wt8tMgvYTQjj6YEuVg==

    [root@ceph_node1 ~]# ceph auth get-key client.kube
    AQC5+vJehk7XIRAAr9mtGFHlUSfT7yQMANeWaw==

    5、创建admin secret,在k8s管理节点

    #CEPH_ADMIN_SECRET替换为 client.admin 获取到的key
    [root@k8s-master ~]# export CEPH_ADMIN_SECRET='AQCzcPFeYnOoABAATaM1Wt8tMgvYTQjj6YEuVg=='

    [root@k8s-master ~]# kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
    --from-literal=key=$CEPH_ADMIN_SECRET \
    --namespace=kube-system

    6、在default命名空间创建pvc用于访问ceph 的secret,在k8s管理节点

    #CEPH_KUBE_SECRET替换为 client.kube 获取到的key
    [root@k8s-master ~]# export CEPH_KUBE_SECRET='AQC5+vJehk7XIRAAr9mtGFHlUSfT7yQMANeWaw=='

    [root@k8s-master ~]# kubectl create secret generic ceph-user-secret --type="kubernetes.io/rbd" \
    --from-literal=key=$CEPH_KUBE_SECRET \
    --namespace=default

    7、查看secret

    [root@k8s-master ~]# kubectl get secret ceph-user-secret -o yaml
    [root@k8s-master ~]# kubectl get secret ceph-secret -n kube-system -o yaml

    8、配置StorageClass

    [root@k8s-master ~]# cat >storageclass-ceph-rdb.yaml<<EOF
    kind: StorageClass
    apiVersion: storage.k8s.io/v1
    metadata:
    name: dynamic-ceph-rdb
    provisioner: ceph.com/rbd
    parameters:
    monitors: 192.168.3.27:6789,192.168.3.60:6789,192.168.3.95:6789
    adminId: admin
    adminSecretName: ceph-secret
    adminSecretNamespace: kube-system
    pool: kube
    userId: kube
    userSecretName: ceph-user-secret
    fsType: ext4
    imageFormat: "2"
    imageFeatures: "layering"
    EOF

    9、创建StorageClass

    [root@k8s-master ~]# kubectl apply -f storageclass-ceph-rdb.yaml
    1.
    10、查看

    [root@k8s-master ~]# kubectl get sc
    1.


    测试使用
    1、创建pvc测试

    [root@k8s-master ~]# cat >ceph-rdb-pvc-test.yaml<<EOF
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
    name: ceph-rdb-claim
    spec:
    accessModes:
    - ReadWriteOnce
    storageClassName: dynamic-ceph-rdb
    resources:
    requests:
    storage: 2Gi
    EOF

    [root@k8s-master ~]# kubectl apply -f ceph-rdb-pvc-test.yaml
    persistentvolumeclaim/ceph-rdb-claim created

    2、查看pvc和pv

    [root@k8s-master ~]# kubectl get pvc
    NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
    ceph-rdb-claim Bound pvc-bd2363f1-a841-46d0-ad54-99267173bc04 2Gi RWO dynamic-ceph-rdb 16s

    [root@k8s-master ~]# kubectl get pv
    NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
    pvc-bd2363f1-a841-46d0-ad54-99267173bc04 2Gi RWO Delete Bound default/ceph-rdb-claim dynamic-ceph-rdb 29s

    3、编写nginx pod资源配置清单进行测试

    [root@k8s-master ~]# cat >nginx-pod.yaml<<EOF
    apiVersion: v1
    kind: Pod
    metadata:
    name: nginx-pod1
    labels:
    name: nginx-pod1
    spec:
    containers:
    - name: nginx-pod1
    image: nginx:alpine
    ports:
    - name: web
    containerPort: 80
    volumeMounts:
    - name: ceph-rdb
    mountPath: /usr/share/nginx/html
    volumes:
    - name: ceph-rdb
    persistentVolumeClaim:
    claimName: ceph-rdb-claim
    EOF

    4、创建pod 并查看

    [root@k8s-master ~]# kubectl apply -f nginx-pod.yaml
    pod/nginx-pod1 created

    [root@k8s-master ~]# kubectl get pods
    NAME READY STATUS RESTARTS AGE
    nginx-pod1 1/1 Running 0 2m25s

    [root@k8s-master ~]# kubectl get pods -o wide
    NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
    nginx-pod1 1/1 Running 0 2m34s 10.244.1.5 k8s-node1 <none> <none>

    5、修改文件内容

    [root@k8s-master ~]# kubectl exec -it nginx-pod1 -- /bin/sh -c 'echo Hello World from Ceph RBD!!! > /usr/share/nginx/html/index.html'
    1.
    6、访问测试

    [root@k8s-master ~]# POD_IP=$(kubectl get pods -o wide |grep nginx-pod1 |awk '{print $(NF-3)}')
    [root@k8s-master ~]# curl $POD_IP
    Hello World from Ceph RBD!!!

    7、清理

    [root@k8s-master ~]# kubectl delete -f nginx-pod.yaml

    [root@k8s-master ~]# kubectl delete -f ceph-rdb-pvc-test.yaml

    使用CephFS作为持久数据卷
    Ceph端创建CephFS pool
    1、创建两个pool分别存储数据和元数据,在ceph的mon或者admin节点 (这里测试,所以只给了8个pg_num)

    [root@ceph_node1 ~]# ceph osd pool create fs_data 8
    pool 'fs_data' created
    [root@ceph_node1 ~]# ceph osd pool create fs_metadata 8
    pool 'fs_metadata' created

    2、创建一个CephFS,在ceph的mon或者admin节点

    [root@ceph_node1 ~]# ceph fs new cephfs fs_metadata fs_data
    new fs with metadata pool 8 and data pool 7

    3、查看

    [root@ceph_node1 ~]# ceph fs ls
    name: cephfs, metadata pool: fs_metadata, data pools: [fs_data ]

    配置 cephfs-provisioner

    官方没有提供cephfs动态卷支持,使用社区提供的cephfs-provisioner

    1、编写yaml文件

    [root@k8s-master ~]# cat >external-storage-cephfs-provisioner.yaml<<EOF
    apiVersion: v1
    kind: ServiceAccount
    metadata:
    name: cephfs-provisioner
    namespace: kube-system
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: cephfs-provisioner
    rules:
    - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
    - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
    - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
    - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
    - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
    - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["create", "get", "delete"]
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
    name: cephfs-provisioner
    subjects:
    - kind: ServiceAccount
    name: cephfs-provisioner
    namespace: kube-system
    roleRef:
    kind: ClusterRole
    name: cephfs-provisioner
    apiGroup: rbac.authorization.k8s.io

    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: Role
    metadata:
    name: cephfs-provisioner
    namespace: kube-system
    rules:
    - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["create", "get", "delete"]
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
    name: cephfs-provisioner
    namespace: kube-system
    roleRef:
    apiGroup: rbac.authorization.k8s.io
    kind: Role
    name: cephfs-provisioner
    subjects:
    - kind: ServiceAccount
    name: cephfs-provisioner
    namespace: kube-system

    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: cephfs-provisioner
    namespace: kube-system
    spec:
    selector:
    matchLabels:
    app: cephfs-provisioner
    replicas: 1
    strategy:
    type: Recreate
    template:
    metadata:
    labels:
    app: cephfs-provisioner
    spec:
    containers:
    - name: cephfs-provisioner
    image: "registry.cn-chengdu.aliyuncs.com/ives/cephfs-provisioner:latest"
    env:
    - name: PROVISIONER_NAME
    value: ceph.com/cephfs
    command:
    - "/usr/local/bin/cephfs-provisioner"
    args:
    - "-id=cephfs-provisioner-1"
    serviceAccount: cephfs-provisioner
    EOF

    2、创建相关资源

    [root@k8s-master ~]# kubectl apply -f external-storage-cephfs-provisioner.yaml

    [root@k8s-master ~]# kubectl get pods -n kube-system |grep cephfs
    cephfs-provisioner-6d76ff6bd5-zzlmt 1/1 Running 0 28s
    1.
    2.
    3.
    4.


    配置 storageclass
    1、查看key,在ceph的mon或者admin节点

    [root@ceph_node1 ~]# ceph auth get-key client.admin
    AQCzcPFeYnOoABAATaM1Wt8tMgvYTQjj6YEuVg==
    1.
    2.
    2、创建admin secret,在k8s管理节点

    #CEPH_ADMIN_SECRET替换为 client.admin 获取到的key
    [root@k8s-master ~]# export CEPH_ADMIN_SECRET='AQCzcPFeYnOoABAATaM1Wt8tMgvYTQjj6YEuVg=='

    [root@k8s-master ~]# kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
    --from-literal=key=$CEPH_ADMIN_SECRET \
    --namespace=kube-system
    1.
    2.
    3.
    4.
    5.
    6.
    3、查看secret

    [root@k8s-master ~]# kubectl get secret ceph-secret -n kube-system -o yaml
    1.
    4、配置StorageClass

    [root@k8s-master ~]# cat >storageclass-cephfs.yaml<<EOF
    kind: StorageClass
    apiVersion: storage.k8s.io/v1
    metadata:
    name: dynamic-cephfs
    provisioner: ceph.com/cephfs
    parameters:
    monitors: 192.168.3.27:6789,192.168.3.60:6789,192.168.3.95:6789
    adminId: admin
    adminSecretName: ceph-secret
    adminSecretNamespace: "kube-system"
    claimRoot: /volumes/kubernetes
    EOF
    1.
    2.
    3.
    4.
    5.
    6.
    7.
    8.
    9.
    10.
    11.
    12.
    13.
    5、创建StorageClass

    [root@k8s-master ~]# kubectl apply -f storageclass-cephfs.yaml
    storageclass.storage.k8s.io/dynamic-cephfs created
    1.
    2.
    6、查看

    [root@k8s-master ~]# kubectl get sc
    NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
    dynamic-cephfs ceph.com/cephfs Delete Immediate false 17s
    1.
    2.
    3.


    测试使用
    1、创建pvc测试

    [root@k8s-master ~]# cat >cephfs-pvc-test.yaml<<EOF
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
    name: cephfs-claim
    spec:
    accessModes:
    - ReadWriteMany
    storageClassName: dynamic-cephfs
    resources:
    requests:
    storage: 2Gi
    EOF

    [root@k8s-master ~]# kubectl apply -f cephfs-pvc-test.yaml
    persistentvolumeclaim/cephfs-claim created

    2、查看pv和pvc

    [root@k8s-master ~]# kubectl get pvc
    NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
    cephfs-claim Bound pvc-b8194840-2664-418c-bad1-df1a4b028f30 2Gi RWX dynamic-cephfs 3s

    [root@k8s-master ~]# kubectl get pv |grep pvc
    pvc-b8194840-2664-418c-bad1-df1a4b028f30 2Gi RWX Delete Bound default/cephfs-claim dynamic-cephfs 33s

    3、编写nginx pod资源配置清单进行测试

    [root@k8s-master ~]# cat >nginx-pod.yaml<<EOF
    apiVersion: v1
    kind: Pod
    metadata:
    name: nginx-pod2
    labels:
    name: nginx-pod2
    spec:
    containers:
    - name: nginx-pod2
    image: nginx
    ports:
    - name: web
    containerPort: 80
    volumeMounts:
    - name: cephfs
    mountPath: /usr/share/nginx/html
    volumes:
    - name: cephfs
    persistentVolumeClaim:
    claimName: cephfs-claim
    EOF

    4、创建pod 并查看

    [root@k8s-master ~]# kubectl apply -f nginx-pod.yaml
    pod/nginx-pod2 created

    [root@k8s-master ~]# kubectl get pods
    NAME READY STATUS RESTARTS AGE
    nginx-pod2 1/1 Running 0 16s

    [root@k8s-master ~]# kubectl get pods -o wide
    NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
    nginx-pod2 1/1 Running 0 88s 10.244.1.7 k8s-node1 <none> <none>

    5、修改文件内容

    [root@k8s-master ~]# kubectl exec -it nginx-pod2 -- /bin/sh -c 'echo Hello World from CephFS!!! > /usr/share/nginx/html/index.html'
    6、访问测试

    [root@k8s-master ~]# POD_IP=$(kubectl get pods -o wide |grep nginx-pod2 |awk '{print $(NF-3)}')

    [root@k8s-master ~]# curl $POD_IP
    Hello World from CephFS!!!
    7、清理

    [root@k8s-master ~]# kubectl delete -f nginx-pod.yaml

    [root@k8s-master ~]# kubectl delete -f cephfs-pvc-test.yaml

  • 相关阅读:
    Windows消息循环
    python 如何获得网卡的Ip地址
    curl 如何测量它花了多少时间?
    mininet 如何创建有不同带宽的链路
    Emacs学习笔记:多窗口操作
    RYU 如何扔掉一个符合要求的数据包
    RYU OFPMatch 的使用方法
    __attribute__如何使用的记录
    make file 和 GCC标志学习
    mininet and ovs 总结
  • 原文地址:https://www.cnblogs.com/hanease/p/16332086.html
Copyright © 2020-2023  润新知