• 使用CephRBD为Kubernetes提供StorageClass


    1、创建存储池并启用RBD功能

    ceph osd pool create <poolname> <pg_num> <pgp_num>
    

    2、创建Ceph用户,提供给k8s使用

    [root@ceph-1 ~]# ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=<poolname>'
    
    [client.kube]
    	key = AQCaCZRexOqiGxAAxcbV9jMEIF8Eic133uCqtQ==
    

    3、在k8s各节点安装ceph-common

    # 安装epel源
    yum -y install epel-release
    
    # 配置yum源
    cat /etc/yum.repos.d/ceph.repo
    [ceph]
    name=Ceph packages for $basearch
    baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/$basearch
    enabled=1
    priority=2
    gpgcheck=1
    type=rpm-md
    gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
    
    [ceph-noarch]
    name=Ceph noarch packages
    baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch
    enabled=1
    priority=2
    gpgcheck=1
    type=rpm-md
    gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
    
    [ceph-source]
    name=Ceph source packages
    baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS
    enabled=0
    priority=2
    gpgcheck=1
    type=rpm-md
    gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
    
    # 安装ceph-common
    yum -y install ceph-common
    

    4、复制ceph.conf以及admin用户的keyring文件到k8s各个节点(包括master和node)

    # 在ceph-mon执行
    for i in {1..5}; do scp /etc/ceph/{ceph.conf,ceph.client.admin.keyring} ceph-$i:/etc/ceph/
    
    # 以上命令执行完毕,即可在k8s各节点执行ceph命令
    

    5、创建Secret资源,以keyring的key值为data

    # 在ceph-mon上获取用户keyring
    [root@ceph-1 ~]# ceph auth get-key client.admin |base64
    QVFDRnpvNWVrUFUyRVJBQVFmZ21qTEZGWFNhZFdnUWVzbHZ0L2c9PQ==
    
    [root@ceph-1 ~]# ceph auth get-key client.kube |base64
    QVFBTHo1SmUxNUpKTkJBQThGK0hTMk1rcWZsQXlYcEM4RnU4SXc9PQ==
    
    # 在k8s上为ceph admin用户创建secret资源
    cat ceph-admin-secret.yaml
    apiVersion: v1
    kind: Secret
    metadata: 
      name: ceph-admin-secret
      namespace: default
    data:
      key: QVFDRnpvNWVrUFUyRVJBQVFmZ21qTEZGWFNhZFdnUWVzbHZ0L2c9PQ==
    type: kubernetes.io/rbd
    
    # 在k8s上为ceph kube用户创建secret资源
    cat ceph-kube-secret.yaml
    apiVersion: v1
    kind: Secret
    metadata:
      name: ceph-kube-secret
      namespace: default
    data:
      key: QVFBTHo1SmUxNUpKTkJBQThGK0hTMk1rcWZsQXlYcEM4RnU4SXc9PQ==
    type: kubernetes.io/rbd
    
    # 应用两个secret
    kubectl apply -f ceph-admin-secret.yaml
    kubectl apply -f ceph-kube-secret.yaml
    kubectl get secret
    NAME                                  TYPE                                  DATA   AGE
    ceph-admin-secret                     kubernetes.io/rbd                     1      23h
    ceph-kube-secret                      kubernetes.io/rbd                     1      22h
    

    6、在Ceph存储池里面创建对应的RBDImage

    rbd create -p <poolname> -s <size> <imagename>
    
    rbd create -p mypool -s 2G ceph-image
    rbd info ceph-image -p mypool
    rbd image 'ceph-image':
    	size 2GiB in 512 objects
    	order 22 (4MiB objects)
    	block_name_prefix: rbd_data.281756b8b4567
    	format: 2
    	features: layering, exclusive-lock, object-map, fast-diff, deep-flatten   
    	# object-map, fast-diff, deep-flatten 这三个特性需要关闭,否则应用pod时会报错
    	flags: 
    	create_timestamp: Sun Apr 12 04:48:28 2020
    # 关闭以上三个特性
    rbd feature disable mypool/ceph-image object-map fast-diff deep-flatten
    

    静态PV的使用(手动创建)

    创建PV

    cat pv.yaml
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: ceph-test-pv
    spec:
      capacity:
        storage: 2Gi			    # 存储需求
      accessModes:
      - ReadWriteOnce
      rbd:
        monitors:
        - ceph-1:6789
        - ceph-2:6789
        - ceph-3:6789
        pool: mypool				# 第一步创建的pool名称
        image: ceph-image
        user: admin
        secretRef:         
          name: ceph-admin-secret	 #  第五步创建的secret资源名称
        fsType: ext4
        readOnly: false
      persistentVolumeReclaimPolicy: Retain		# pv回收机制:Retain(保留)  Recycle(回收)  delete(删除)
    # Retain保留,默认,保留生成的数据
    # recycle,回收,删除生成的数据,回收pv
    # delete,删除,pvc解除绑定后,pv也就自动删除
    # 详情见:https://www.cnblogs.com/Smbands/p/10858145.html
    
    # 应用PV
    kubectl apply -f pv.yaml
    

    创建PVC

    cat pvc.yaml
    apiVersion: v1
    kind: PersistentVolumeClaim
    metadata:
      name: ceph-test-pvc
    spec:
      accessModes:
      - ReadWriteOnce
      resources:
        requests:
          storage: 2Gi
    # 应用PVC
    kubectl apply -f pvc.yaml
    [root@k8s-m1 ]# kubectl get pvc
    NAME            STATUS   VOLUME        CAPACITY   ACCESS MODES   STORAGECLASS    AGE
    ceph-test-pvc   Bound    ceph-test-pv   2Gi        RWO            rbd            22h
    
    [root@k8s-m1 ]# kubectl get pv
    NAME       CAPACITY  ACCESS MODES  RECLAIM POLICY   STATUS   CLAIM     STORAGECLASS   REASON   AGE
    ceph-test-pv  2Gi      RWO         Retain           Bound    default/ceph-test-pvc             22h
    

    创建Pod

    cat ceph-test-pod.yaml
    apiVersion: v1
    kind: Pod
    metadata:
      name: ceph-test-pod
    spec:
      containers:
      - name: ceph-busybox
        image: busybox
        command: ["sleep","60000"]
        volumeMounts:
        - name: ceph-voll
          mountPath: /usr/share/busybox
          readOnly: false
      volumes:
      - name: ceph-voll
        persistentVolumeClaim:
          claimName: ceph-test-pvc			# 对应pvc name
    # 应用pod
    kubectl apply -f ceph-test-pod
    [root@k8s-m1]# kubectl get pods
    NAME                              READY   STATUS        RESTARTS   AGE
    ceph-test-pod                     1/1     Running       1          24h
    

    动态PV的使用

    创建StorageClass

    [root@k8s-m1]# cat class.yaml 
    apiVersion: storage.k8s.io/v1
    kind: StorageClass
    metadata:
      name: rbd
      annotations:
        storageclass.beta.kubernetes.io/is-default-class: "true"    # 声明此为默认StorageClass
    provisioner: kubernetes.io/rbd
    parameters:
      monitors: ceph-1:6789,ceph-2:6789,ceph-3:6789
      adminId: admin
      adminSecretName: ceph-admin-secret
      adminSecretNamespace: default
      pool: mypool
      userId: kube
      userSecretName: ceph-kube-secret
      userSecretNamespace: default
      fsType: ext4
      imageFormat: "2"
      imageFeatures: "layering"
    reclaimPolicy: Retain
    # 应用storageClass
    kubectl apply -f class.yaml
    kubectl get sc
    NAME            PROVISIONER         AGE
    rbd (default)   kubernetes.io/rbd   25h
    

    创建PVC

    cat pvc.yaml
    apiVersion: v1
    kind: PersistentVolumeClaim
    metadata:
      name: ceph-test-pvc
    spec:
      accessModes:
      - ReadWriteOnce
      resources:
        requests:
          storage: 2Gi
      storageClassName: rbd
    #  应用pvc
    kubectl apply -f pvc.yaml
    [root@k8s-m1 ceph]# kubectl get pvc
    NAME            STATUS      VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
    ceph-test-pvc   Bound    pvc-85fb37f8-693a-4768-889b-097ee731d15f   2Gi                RWO            rbd            107s
    

    创建Pod

    cat ceph-test-pod-1.yaml
    apiVersion: v1
    kind: Pod
    metadata:
      name: ceph-test-pod-1
    spec:
      containers:
      - name: ceph-busybox
        image: busybox
        command: ["sleep","60000"]
        volumeMounts:
        - name: ceph-voll
          mountPath: /usr/share/busybox
          readOnly: false
      volumes:
      - name: ceph-voll
        persistentVolumeClaim:
          claimName: ceph-test-pvc
    # 应用pod
    kubectl apply -f ceph-test-pods-1.yaml
    NAME                              READY   STATUS        RESTARTS   AGE
    ceph-test-pod                     1/1     Running       0          84s
    
  • 相关阅读:
    小波变换的引入,通俗易懂
    Leetcode 437. Path Sum III
    Leetcode 113. Path Sum II
    Leetcode 112 Path Sum
    Leetcode 520 Detect Capital
    Leetcode 443 String Compression
    Leetcode 38 Count and Say
    python中的生成器(generator)总结
    python的random模块及加权随机算法的python实现
    leetcode 24. Swap Nodes in Pairs(链表)
  • 原文地址:https://www.cnblogs.com/Smbands/p/12693155.html
Copyright © 2020-2023  润新知