• Kubernetes


    1,准备工作

    1.1 所有节点安装GFS客户端

    yum install glusterfs glusterfs-fuse -y
    

    1.2 如果不是所有节点要部署GFS管理服务,就在需要部署的节点上打上标签

    [root@k8s-master01 ~]# kubectl label node k8s-master01 storagenode=glusterfs
    node/k8s-master01 labeled
    [root@k8s-master01 ~]# kubectl label node k8s-node01 storagenode=glusterfs
    node/k8s-node01 labeled
    [root@k8s-master01 ~]# kubectl label node k8s-node02 storagenode=glusterfs
    node/k8s-node02 labeled
    

    1.3 载入指定的个别模块

    modprobe dm_snapshot
    modprobe dm_mirror
    modprobe dm_thin_pool
    

    2,创建GFS管理服务容器集群

    • 本文采用容器化方式部署GFS,公司如有GFS集群可直接使用。
    • GFS已Daemonset的方式进行部署,保证每台需要部署GFS管理服务的Node上都运行一个GFS管理服务。
    • 下载相关文件:
    wget https://github.com/heketi/heketi/releases/download/v8.0.0/heketi-client-v8.0.0.linux.amd64.tar.gz
    

    2.1 创建集群

    [root@k8s-master01 kubernetes]# pwd
    /opt/k8s-cluster/heketi-client/share/heketi/kubernetes
    
    • 注意1:此时采用的为默认的挂载方式,可使用其他磁盘当做GFS的工作目录。
    • 注意2:此时创建的namespace为默认的default,按需更改
    • 注意3:可使用gluster/gluster-centos:gluster3u12_centos7镜像
    [root@k8s-master01 kubernetes]# kubectl create -f glusterfs-daemonset.json 
    daemonset.extensions/glusterfs created
    

    2.1.1 查看pods

    [root@k8s-master01 kubernetes]# kubectl get pods -l glusterfs-node=daemonset
    NAME              READY   STATUS    RESTARTS   AGE
    glusterfs-fvxh7   1/1     Running   0          47m
    glusterfs-jjw7b   1/1     Running   0          47m
    glusterfs-td875   1/1     Running   0          47m
    

    2.2 创建Heketi服务

    • Heketi是一个提供RESTful API管理GFS卷的框架,并能够在K8S、OpenShift、OpenStack等云平台上实现动态存储资源供应,支持GFS多集群管理,便于管理员对GFS进行操作。
    • 创建Heketi的ServiceAccount对象:
    [root@k8s-master01 kubernetes]# kubectl create -f heketi-service-account.json
    serviceaccount/heketi-service-account created
    
    [root@k8s-master01 kubernetes]# kubectl get sa
    NAME                     SECRETS   AGE
    default                  1         6d22h
    heketi-service-account   1         18s
    
    • 创建Heketi对应的权限和secret
    [root@k8s-master01 kubernetes]# kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account
    clusterrolebinding.rbac.authorization.k8s.io/heketi-gluster-admin created
    
    [root@k8s-master01 kubernetes]# kubectl create secret generic heketi-config-secret --from-file=./heketi.json
    secret/heketi-config-secret created
    
    • 初始化部署Heketi
    [root@k8s-master01 kubernetes]# kubectl create -f heketi-bootstrap.json
    service/deploy-heketi created
    deployment.extensions/deploy-heketi created
    

    2.3 设置GFS集群

    [root@k8s-master01 heketi-client]# pwd
    /opt/k8s-cluster/heketi-client
    
    [root@k8s-master01 heketi-client]# cp bin/heketi-cli /usr/local/bin/
    
    [root@k8s-master01 heketi-client]# heketi-cli -v
    heketi-cli v8.0.0
    

    2.3.1 修改topology-sample,manage为GFS管理服务的Node节点主机名,storage为Node节点IP,devices为Node节点上的裸设备

    [root@k8s-master01 kubernetes]# more topology-sample.json
    {
      "clusters": [
        {
          "nodes": [
            {
              "node": {
                "hostnames": {
                  "manage": [
                    "k8s-master01"
                  ],
                  "storage": [
                    "192.168.2.100"
                  ]
                },
                "zone": 1
              },
              "devices": [
                {
                  "name": "/dev/sdb",
                  "destroydata": false
                }
              ]
            },
            {
              "node": {
                "hostnames": {
                  "manage": [
                    "k8s-node01"
                  ],
                  "storage": [
                    "192.168.2.101"
                  ]
                },
                "zone": 1
              },
              "devices": [
                {
                  "name": "/dev/sdc",
                  "destroydata": false
                }
              ]
            },
            {
              "node": {
                "hostnames": {
                  "manage": [
                    "k8s-node02"
                  ],
                  "storage": [
                    "192.168.2.102"
                  ]
                },
                "zone": 1
              },
              "devices": [
                {
                  "name": "/dev/sdb",
                  "destroydata": false
                }
              ]
            }
          ]
        }
      ]
    }
    

    2.3.2 查看当前pod的ClusterIP

    [root@k8s-master01 kubernetes]# kubectl get service | grep heketi
    deploy-heketi   ClusterIP   10.105.64.142   <none>        8080/TCP   27m
    
    [root@k8s-master01 kubernetes]# curl 10.105.64.142:8080/hello
    Hello from Heketi
    
    [root@k8s-master01 kubernetes]# export HEKETI_CLI_SERVER=http://10.105.64.142:8080
    

    2.3.3 添加存储设备

    [root@k8s-master01 kubernetes]# heketi-cli topology load --json=topology-sample.json
    Creating cluster ... ID: 4aedb5c75b1d84f6b1a329b7c2a93710
    	Allowing file volumes on cluster.
    	Allowing block volumes on cluster.
    	Creating node k8s-master01 ... ID: 5dcfc78aad8217ff4d484b861f686b87
    		Adding device /dev/sdb ... OK
    	Creating node k8s-node01 ... ID: 091f9aa6c7a8371df10891d12c6cecc1
    		Adding device /dev/sdc ... OK
    	Creating node k8s-node02 ... ID: 4cb5c15c92825cbe4179def7a0df54b9
    		Adding device /dev/sdb ... OK
    
    
    • 注:初始化裸盘
    dd if=/dev/zero of=/dev/sdb
    

    2.3.4 创建heketi持久化卷

    yum install device-mapper* -y
    
    [root@k8s-master01 kubernetes]# heketi-cli setup-openshift-heketi-storage
    Saving heketi-storage.json
    
    [root@k8s-master01 kubernetes]# kubectl create -f heketi-storage.json
    secret/heketi-storage-secret created
    endpoints/heketi-storage-endpoints created
    service/heketi-storage-endpoints created
    job.batch/heketi-storage-copy-job created
    
    • 报错
    [root@k8s-master01 kubernetes]# heketi-cli setup-openshift-heketi-storage
    Error: /usr/sbin/modprobe failed: 1
      thin: Required device-mapper target(s) not detected in your kernel.
      Run `lvcreate --help' for more information.
      
    # 解决办法:所有节点执行modprobe dm_thin_pool
    

    2.3.5 删除中间产物

    [root@k8s-master01 kubernetes]# kubectl delete all,service,jobs,deployment,secret --selector="deploy-heketi"
    pod "deploy-heketi-858f965fd5-68459" deleted
    service "deploy-heketi" deleted
    deployment.apps "deploy-heketi" deleted
    replicaset.apps "deploy-heketi-858f965fd5" deleted
    job.batch "heketi-storage-copy-job" deleted
    secret "heketi-storage-secret" deleted
    

    2.3.6 创建持久化Heketi,持久化方式也可以选用其他方法

    [root@k8s-master01 kubernetes]# kubectl create -f heketi-deployment.json
    secret/heketi-db-backup created
    service/heketi created
    deployment.extensions/heketi created
    

    2.3.7 查看GFS信息

    • 查看pod
    [root@k8s-master01 kubernetes]# kubectl get pod
    NAME                              READY   STATUS      RESTARTS   AGE
    glusterfs-2rbd2                   1/1     Running     0          15h
    glusterfs-5dw27                   1/1     Running     0          15h
    glusterfs-fwd7h                   1/1     Running     0          15h
    heketi-754dfc7cdf-c65w4           1/1     Running     0          6m20s
    
    • 查看最新部署的持久化Heketi的svc
    [root@k8s-master01 kubernetes]# kubectl get service | grep heketi
    heketi                     ClusterIP   10.96.115.101   <none>        8080/TCP   6m42s
    heketi-storage-endpoints   ClusterIP   10.110.142.33   <none>        1/TCP      13m
    
    • 更改HEKETI_CLI_SERVER的值
    [root@k8s-master01 kubernetes]# export HEKETI_CLI_SERVER=http://10.96.115.101:8080
    [root@k8s-master01 kubernetes]# curl $HEKETI_CLI_SERVER/hello
    Hello from Heketi
    
    • 查看GFS信息
    [root@k8s-master01 kubernetes]# heketi-cli topology info
    
    Cluster Id: 4aedb5c75b1d84f6b1a329b7c2a93710
    
        File:  true
        Block: true
    
        Volumes:
    
    	Name: heketidbstorage
    	Size: 2
    	Id: a236230723d7617001e4fa20d3a96e28
    	Cluster Id: 4aedb5c75b1d84f6b1a329b7c2a93710
    	Mount: 192.168.2.101:heketidbstorage
    	Mount Options: backup-volfile-servers=192.168.2.102,192.168.2.100
    	Durability Type: replicate
    	Replica: 3
    	Snapshot: Disabled
    
    		Bricks:
    			Id: 0e66a2c4f7798ce66cd54d235231f60f
    			Path: /var/lib/heketi/mounts/vg_a9a5f6bf3cd3a86b9500980b8c2a3bd7/brick_0e66a2c4f7798ce66cd54d235231f60f/brick
    			Size (GiB): 2
    			Node: 4cb5c15c92825cbe4179def7a0df54b9
    			Device: a9a5f6bf3cd3a86b9500980b8c2a3bd7
    
    			Id: 71f1fc7e2d02a5697a06c28990423f82
    			Path: /var/lib/heketi/mounts/vg_de5f46481b715672c338ca7128bb6fca/brick_71f1fc7e2d02a5697a06c28990423f82/brick
    			Size (GiB): 2
    			Node: 5dcfc78aad8217ff4d484b861f686b87
    			Device: de5f46481b715672c338ca7128bb6fca
    
    			Id: 8f4067bfb6dc9785218a7d2ef615ef17
    			Path: /var/lib/heketi/mounts/vg_38b3ee8f36f32d3e8f4e67115e2b9fc0/brick_8f4067bfb6dc9785218a7d2ef615ef17/brick
    			Size (GiB): 2
    			Node: 091f9aa6c7a8371df10891d12c6cecc1
    			Device: 38b3ee8f36f32d3e8f4e67115e2b9fc0
    
    
        Nodes:
    
    	Node Id: 091f9aa6c7a8371df10891d12c6cecc1
    	State: online
    	Cluster Id: 4aedb5c75b1d84f6b1a329b7c2a93710
    	Zone: 1
    	Management Hostnames: k8s-node01
    	Storage Hostnames: 192.168.2.101
    	Devices:
    		Id:38b3ee8f36f32d3e8f4e67115e2b9fc0   Name:/dev/sdc            State:online    Size (GiB):15      Used (GiB):2       Free (GiB):13
    			Bricks:
    				Id:8f4067bfb6dc9785218a7d2ef615ef17   Size (GiB):2       Path: /var/lib/heketi/mounts/vg_38b3ee8f36f32d3e8f4e67115e2b9fc0/brick_8f4067bfb6dc9785218a7d2ef615ef17/brick
    
    	Node Id: 4cb5c15c92825cbe4179def7a0df54b9
    	State: online
    	Cluster Id: 4aedb5c75b1d84f6b1a329b7c2a93710
    	Zone: 1
    	Management Hostnames: k8s-node02
    	Storage Hostnames: 192.168.2.102
    	Devices:
    		Id:a9a5f6bf3cd3a86b9500980b8c2a3bd7   Name:/dev/sdb            State:online    Size (GiB):15      Used (GiB):2       Free (GiB):13
    			Bricks:
    				Id:0e66a2c4f7798ce66cd54d235231f60f   Size (GiB):2       Path: /var/lib/heketi/mounts/vg_a9a5f6bf3cd3a86b9500980b8c2a3bd7/brick_0e66a2c4f7798ce66cd54d235231f60f/brick
    
    	Node Id: 5dcfc78aad8217ff4d484b861f686b87
    	State: online
    	Cluster Id: 4aedb5c75b1d84f6b1a329b7c2a93710
    	Zone: 1
    	Management Hostnames: k8s-master01
    	Storage Hostnames: 192.168.2.100
    	Devices:
    		Id:de5f46481b715672c338ca7128bb6fca   Name:/dev/sdb            State:online    Size (GiB):15      Used (GiB):2       Free (GiB):13
    			Bricks:
    				Id:71f1fc7e2d02a5697a06c28990423f82   Size (GiB):2       Path: /var/lib/heketi/mounts/vg_de5f46481b715672c338ca7128bb6fca/brick_71f1fc7e2d02a5697a06c28990423f82/brick
    

    2.4 定义StorageClass

    • Provisioner参数须设置为"kubernetes.io/glusterfs"
    • resturl地址为API Server所在主机可以访问到的Heketi服务的某个地址
    [root@k8s-master01 kubernetes]# cat storageclass-gfs-heketi.yaml
    apiVersion: storage.k8s.io/v1
    kind: StorageClass
    metadata:
      name: gluster-heketi
    provisioner: kubernetes.io/glusterfs
    parameters:
      resturl: "http://10.96.115.101:8080"
      restauthenabled: "false"
    
    [root@k8s-master01 kubernetes]# kubectl create -f storageclass-gfs-heketi.yaml
    storageclass.storage.k8s.io/gluster-heketi created
    

    2.5 定义PVC及测试Pod

    • 定义pvc
    [root@k8s-master01 kubernetes]# cat pod-use-pvc.yaml
    apiVersion: v1
    kind: Pod
    metadata:
      name: pod-use-pvc
    spec:
      containers:
      - name: pod-use-pvc
        image: busybox
        command:
        - sleep
        - "3600"
        volumeMounts:
        - name: gluster-volume
          mountPath: "/pv-data"
          readOnly: false
      volumes:
      - name: gluster-volume
        persistentVolumeClaim:
          claimName: pvc-gluster-heketi
    ---
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: pvc-gluster-heketi
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: "gluster-heketi"
      resources:
        requests:
          storage: 1Gi
    
    [root@k8s-master01 kubernetes]#  kubectl create -f pod-use-pvc.yaml
    pod/pod-use-pvc created
    persistentvolumeclaim/pvc-gluster-heketi created
    
    • PVC定义一旦生成,系统便触发Heketi进行相应的操作,主要为在GlusterFS集群上创建brick,再创建并启动一个volume
    • 查看 pv,pvc
    [root@k8s-master01 kubernetes]# kubectl get pv,pvc  | grep gluster
    
    persistentvolume/pvc-b21c710b-ff41-11e8-87ec-000c2925b928   1Gi        RWO            Delete           Bound         default/pvc-gluster-heketigluster-heketi                          71s
    persistentvolumeclaim/pvc-gluster-heketi   Bound    pvc-b21c710b-ff41-11e8-87ec-000c2925b928   1Gi        RWO            gluster-heketi   106s
    

    3,测试数据

    3.1 进入到pod并创建文件

    [root@k8s-master01 kubernetes]# kubectl exec -ti pod-use-pvc -- /bin/sh
    / # cd /pv-data/
    /pv-data # mkdir 1 2 3
    /pv-data # ls
    1  2  3
    

    3.2 宿主机挂载测试

    [root@k8s-master01 kubernetes]# heketi-cli topology info
    
    Cluster Id: 4aedb5c75b1d84f6b1a329b7c2a93710
    
        File:  true
        Block: true
    
        Volumes:
    
    	Name: vol_7712ca88a6ee8f3f9c616acf9b33e946
    	Size: 1
    	Id: 7712ca88a6ee8f3f9c616acf9b33e946
    	Cluster Id: 4aedb5c75b1d84f6b1a329b7c2a93710
    	Mount: 192.168.2.101:vol_7712ca88a6ee8f3f9c616acf9b33e946
    	Mount Options: backup-volfile-servers=192.168.2.102,192.168.2.100
    	Durability Type: replicate
    	Replica: 3
    	Snapshot: Enabled
    ....
    

    3.3 或者使用volume list查看

    [root@k8s-master01 kubernetes]# heketi-cli volume list
    Id:7712ca88a6ee8f3f9c616acf9b33e946    Cluster:4aedb5c75b1d84f6b1a329b7c2a93710    Name:vol_7712ca88a6ee8f3f9c616acf9b33e946
    Id:a236230723d7617001e4fa20d3a96e28    Cluster:4aedb5c75b1d84f6b1a329b7c2a93710    Name:heketidbstorage
    

    3.4 挂载方式

    Mount: 192.168.2.101:vol_7712ca88a6ee8f3f9c616acf9b33e946
    volume Name:vol_56d636b452d31a9d4cb523d752ad0891
    

    3.5 挂载

    [root@k8s-master01 kubernetes]# mount -t glusterfs 192.168.2.101:vol_7712ca88a6ee8f3f9c616acf9b33e946 /mnt/
    
    [root@k8s-master01 kubernetes]# ls /mnt/
    1  2  3
    

    4,测试Deployments

    4.1 定义 nginx-gluster

    [root@k8s-master01 kubernetes]# cat nginx-gluster.yaml
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: nginx-gfs
    spec:
      replicas: 2
      template:
        metadata:
          labels:
            name: nginx
        spec:
          containers:
            - name: nginx
              image: nginx
              imagePullPolicy: IfNotPresent
              ports:
                - containerPort: 80
              volumeMounts:
                - name: nginx-gfs-html
                  mountPath: "/usr/share/nginx/html"
                - name: nginx-gfs-conf
                  mountPath: "/etc/nginx/conf.d"
          volumes:
          - name: nginx-gfs-html
            persistentVolumeClaim:
              claimName: glusterfs-nginx-html
          - name: nginx-gfs-conf
            persistentVolumeClaim:
              claimName: glusterfs-nginx-conf
    ---
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: glusterfs-nginx-html
    spec:
      accessModes: [ "ReadWriteMany" ]
      storageClassName: "gluster-heketi"
      resources:
        requests:
          storage: 500Mi
    ---
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: glusterfs-nginx-conf
    spec:
      accessModes: [ "ReadWriteMany" ]
      storageClassName: "gluster-heketi"
      resources:
        requests:
          storage: 10Mi
    

    4.2 创建 nginx-gluster

    [root@k8s-master01 kubernetes]# kubectl create -f nginx-gluster.yaml
    deployment.extensions/nginx-gfs created
    persistentvolumeclaim/glusterfs-nginx-html created
    persistentvolumeclaim/glusterfs-nginx-conf created
    

    4.3 查看

    [root@k8s-master01 kubernetes]# kubectl get po,pvc,pv | grep nginx-gfs
    pod/nginx-gfs-dbf47b7-5sft5           1/1     Running     0          63s
    
    pod/nginx-gfs-dbf47b7-znlrl           1/1     Running     0          63s
    

    4.4 查看挂载情况

    [root@k8s-master01 kubernetes]# kubectl exec -ti nginx-gfs-dbf47b7-5sft5 -- df -Th
    Filesystem                                         Type            Size  Used Avail Use% Mounted on
    overlay                                            overlay          44G  9.4G   35G  22% /
    tmpfs                                              tmpfs           3.9G     0  3.9G   0% /dev
    tmpfs                                              tmpfs           3.9G     0  3.9G   0% /sys/fs/cgroup
    /dev/mapper/centos-root                            xfs              44G  9.4G   35G  22% /etc/hosts
    shm                                                tmpfs            64M     0   64M   0% /dev/shm
    192.168.2.100:vol_11237ec517d6e79fbc5ca193adbaff19 fuse.glusterfs 1014M   33M  982M   4% /etc/nginx/conf.d
    192.168.2.100:vol_67f553b5ce7bbe5360cf4b5c4fdbeda8 fuse.glusterfs 1014M   33M  982M   4% /usr/share/nginx/html
    tmpfs                                              tmpfs           3.9G   12K  3.9G   1% /run/secrets/kubernetes.io/serviceaccount
    tmpfs                                              tmpfs           3.9G     0  3.9G   0% /sys/firmware
    
    [root@k8s-master01 kubernetes]# kubectl exec -ti nginx-gfs-dbf47b7-znlrl -- df -Th
    Filesystem                                         Type            Size  Used Avail Use% Mounted on
    overlay                                            overlay          44G   11G   34G  25% /
    tmpfs                                              tmpfs           3.9G     0  3.9G   0% /dev
    tmpfs                                              tmpfs           3.9G     0  3.9G   0% /sys/fs/cgroup
    /dev/mapper/centos-root                            xfs              44G   11G   34G  25% /etc/hosts
    shm                                                tmpfs            64M     0   64M   0% /dev/shm
    192.168.2.100:vol_11237ec517d6e79fbc5ca193adbaff19 fuse.glusterfs 1014M   33M  982M   4% /etc/nginx/conf.d
    192.168.2.100:vol_67f553b5ce7bbe5360cf4b5c4fdbeda8 fuse.glusterfs 1014M   33M  982M   4% /usr/share/nginx/html
    tmpfs                                              tmpfs           3.9G   12K  3.9G   1% /run/secrets/kubernetes.io/serviceaccount
    tmpfs                                              tmpfs           3.9G     0  3.9G   0% /sys/firmware
    

    4.5 挂载 并 创建index.html

    [root@k8s-master01 kubernetes]# mount -t glusterfs 192.168.2.100:vol_67f553b5ce7bbe5360cf4b5c4fdbeda8 /mnt/
    
    [root@k8s-master01 kubernetes]# ls /mnt/
    
    
    [root@k8s-master01 kubernetes]# echo -e "`date`
    hello ngins-glusterfs" > /mnt/index.html
    
    [root@k8s-master01 kubernetes]# cat /mnt/index.html
    Fri Dec 14 10:07:43 CST 2018
    hello ngins-glusterfs
    
    [root@k8s-master01 kubernetes]# kubectl exec -it nginx-gfs-dbf47b7-znlrl -- cat /usr/share/nginx/html/index.html
    Fri Dec 14 10:07:43 CST 2018
    hello ngins-glusterfs
    

    4.6 扩容nginx

    • 查看 deployment
    [root@k8s-master01 kubernetes]# kubectl get deployment
    NAME        DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
    heketi      1         1         1            1           43m
    nginx-gfs   2         2         2            2           13m
    
    • 更改replicas
    [root@k8s-master01 kubernetes]# kubectl scale deployment nginx-gfs --replicas 3
    deployment.extensions/nginx-gfs scaled
    
    • 查看
    [root@k8s-master01 kubernetes]# kubectl get deployment
    NAME        DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
    heketi      1         1         1            1           45m
    nginx-gfs   3         3         3            3           16m
    
    [root@k8s-master01 kubernetes]# kubectl get pod
    NAME                              READY   STATUS      RESTARTS   AGE
    glusterfs-2rbd2                   1/1     Running     0          16h
    glusterfs-5dw27                   1/1     Running     0          16h
    glusterfs-fwd7h                   1/1     Running     0          16h
    heketi-754dfc7cdf-c65w4           1/1     Running     0          45m
    nginx-gfs-dbf47b7-498mr           1/1     Running     0          70s
    nginx-gfs-dbf47b7-5sft5           1/1     Running     0          15m
    nginx-gfs-dbf47b7-znlrl           1/1     Running     0          15m
    pod-use-pvc                       1/1     Running     0          31m
    recycler-for-pv-redis-cluster-2   0/1     Completed   0          15h
    recycler-for-pv-redis-cluster-5   0/1     Completed   0          15h
    
    [root@k8s-master01 kubernetes]# kubectl exec -it nginx-gfs-dbf47b7-498mr -- cat /usr/share/nginx/html/index.html
    Fri Dec 14 10:07:43 CST 2018
    hello ngins-glusterfs
    

    5,扩容GlusterFS

    5.1 添加磁盘至已存在的node节点 k8s-node02

    5.1.1 查看k8s-node02部署的pod name及IP

    [root@k8s-master01 kubernetes]# kubectl get pod -o wide -l glusterfs-node
    NAME              READY   STATUS    RESTARTS   AGE   IP              NODE           NOMINATED NODE
    glusterfs-2rbd2   1/1     Running   0          16h   192.168.2.100   k8s-master01   <none>
    glusterfs-5dw27   1/1     Running   0          16h   192.168.2.101   k8s-node01     <none>
    glusterfs-fwd7h   1/1     Running   0          16h   192.168.2.102   k8s-node02     <none>
    

    5.1.2 在 k8s-node02 上确认新添加的盘符

    Disk /dev/sdc: 107.4 GB, 107374182400 bytes, 209715200 sectors
    Units = sectors of 1 * 512 = 512 bytes
    Sector size (logical/physical): 512 bytes / 512 bytes
    I/O size (minimum/optimal): 512 bytes / 512 bytes
    

    5.1.3 使用heketi-cli查看cluster ID和所有node ID

    [root@k8s-master01 kubernetes]# heketi-cli cluster list
    Clusters:
    Id:4aedb5c75b1d84f6b1a329b7c2a93710 [file][block]
    
    [root@k8s-master01 kubernetes]# heketi-cli cluster info 4aedb5c75b1d84f6b1a329b7c2a93710
    Cluster id: 4aedb5c75b1d84f6b1a329b7c2a93710
    Nodes:
    091f9aa6c7a8371df10891d12c6cecc1
    4cb5c15c92825cbe4179def7a0df54b9
    5dcfc78aad8217ff4d484b861f686b87
    Volumes:
    11237ec517d6e79fbc5ca193adbaff19
    67f553b5ce7bbe5360cf4b5c4fdbeda8
    7712ca88a6ee8f3f9c616acf9b33e946
    a236230723d7617001e4fa20d3a96e28
    Block: true
    
    File: true
    

    5.1.4 找到对应的k8s-node02的node ID

    [root@k8s-master01 kubernetes]# heketi-cli node info 4cb5c15c92825cbe4179def7a0df54b9
    Node Id: 4cb5c15c92825cbe4179def7a0df54b9
    State: online
    Cluster Id: 4aedb5c75b1d84f6b1a329b7c2a93710
    Zone: 1
    Management Hostname: k8s-node02
    Storage Hostname: 192.168.2.102
    Devices:
    Id:a9a5f6bf3cd3a86b9500980b8c2a3bd7   Name:/dev/sdb            State:online    Size (GiB):15      Used (GiB):5       Free (GiB):10      Bricks:4
    

    5.1.5 添加磁盘至GFS集群的node02

    [root@k8s-master01 kubernetes]# heketi-cli device add --name=/dev/sdc --node=4cb5c15c92825cbe4179def7a0df54b9
    Device added successfully
    

    5.1.6 查看结果

    [root@k8s-master01 kubernetes]# heketi-cli node info 4cb5c15c92825cbe4179def7a0df54b9
    Node Id: 4cb5c15c92825cbe4179def7a0df54b9
    State: online
    Cluster Id: 4aedb5c75b1d84f6b1a329b7c2a93710
    Zone: 1
    Management Hostname: k8s-node02
    Storage Hostname: 192.168.2.102
    Devices:
    Id:8132038da0b70cde967c0cb8b80dc487   Name:/dev/sdc            State:online    Size (GiB):99      Used (GiB):0       Free (GiB):99      Bricks:0
    Id:a9a5f6bf3cd3a86b9500980b8c2a3bd7   Name:/dev/sdb            State:online    Size (GiB):15      Used (GiB):5       Free (GiB):10      Bricks:4
    

    5.2 添加新节点

    • 将k8s-node03,IP为 192.168.2.103 的加入glusterfs集群,并将该节点的/dev/sdb加入到集群

    5.2.1 打上label

    [root@k8s-master01 kubernetes]# kubectl label node k8s-node03 storagenode=glusterfs
    node/k8s-node03labeled
    
    [root@k8s-master01 kubernetes]# kubectl  get pod -owide -l glusterfs-node
    NAME              READY   STATUS    RESTARTS   AGE   IP              NODE           NOMINATED NODE
    glusterfs-2rbd2   1/1     Running   0          17h   192.168.2.100   k8s-master01   <none>
    glusterfs-5dw27   1/1     Running   0          17h   192.168.2.101   k8s-node01     <none>
    glusterfs-fwd7h   1/1     Running   0          17h   192.168.2.102   k8s-node02     <none>
    glusterfs-33d7h   1/1     Running   0          3m   192.168.2.103   k8s-node03     <none>
    

    5.2.2 在任意节点执行peer probe

    [root@k8s-master01 kubernetes]# kubectl exec -ti glusterfs-33d7h  -- gluster peer probe 192.168.2.103
    peer probe: success.
    

    5.2.3 将新节点加入到glusterfs集群中

    [root@k8s-master01 kubernetes]# heketi-cli cluster list
    Clusters:
    Id:4aedb5c75b1d84f6b1a329b7c2a93710[file][block]
    
    [root@k8s-master01 kubernetes]# heketi-cli node add --zone=1 --cluster=4aedb5c75b1d84f6b1a329b7c2a93710 --management-host-name=k8s-node03--storage-host-name=192.168.2.103
    Node information:
    Id: 150bc8c458a70310c6137e840619758c
    State: online
    Cluster Id: 4aedb5c75b1d84f6b1a329b7c2a93710 
    Zone: 1
    Management Hostname k8s-node03
    Storage Hostname 192.168.2.103
    

    5.2.4 将新节点的磁盘加入到集群中

    [root@k8s-master01 kubernetes]# heketi-cli device add --name=/dev/sdb --node=150bc8c458a70310c6137e840619758c
    Device added successfull
    

    5.2.5 验证

    [root@k8s-master01 kubernetes]# heketi-cli node list
    Id:091f9aa6c7a8371df10891d12c6cecc1	Cluster:4aedb5c75b1d84f6b1a329b7c2a93710
    Id:4cb5c15c92825cbe4179def7a0df54b9	Cluster:4aedb5c75b1d84f6b1a329b7c2a93710
    Id:5dcfc78aad8217ff4d484b861f686b87	Cluster:4aedb5c75b1d84f6b1a329b7c2a93710
    Id:150bc8c458a70310c6137e840619758c     Cluster:4aedb5c75b1d84f6b1a329b7c2a93710
    
    [root@k8s-master01 kubernetes]# heketi-cli node info 150bc8c458a70310c6137e840619758c
    Node Id: 150bc8c458a70310c6137e840619758c
    State: online
    Cluster Id: 4aedb5c75b1d84f6b1a329b7c2a93710
    Zone: 1
    Management Hostname: k8s-master03
    Storage Hostname: 192.168.2.103
    Devices:
    Id:2d5210c19858fb7ea3f805e6f582ecce   Name:/dev/sdb            State:online    Size (GiB):39      Used (GiB):0       Free (GiB):39      Bricks:0 
    
    • :扩容volume可使用 heketi-cli volume expand --volume=volumeID --expand-size=10

    6,重启heketi报错解决

    6.1 报错如下:

    [heketi] ERROR 2018/12/14 11:57:51 heketi/apps/glusterfs/app.go:185:glusterfs.NewApp: Heketi was terminated while performing one or more operations. Server may refuse to start as long as pending operations are present in the db.
    

    6.2 解决

    • 修改heketi.json,添加 "brick_min_size_gb" : 1,
    [root@k8s-master01 kubernetes]# cat heketi.json 
    {
      "_port_comment": "Heketi Server Port Number",
      "port": "8080",
    
      "_use_auth": "Enable JWT authorization. Please enable for deployment",
      "use_auth": false,
    
      "_jwt": "Private keys for access",
      "jwt": {
        "_admin": "Admin has access to all APIs",
        "admin": {
          "key": "My Secret"
        },
        "_user": "User only has access to /volumes endpoint",
        "user": {
          "key": "My Secret"
        }
      },
    
      "_glusterfs_comment": "GlusterFS Configuration",
      "glusterfs": {
        "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
        "executor": "kubernetes",
    
        "_db_comment": "Database file name",
        "db": "/var/lib/heketi/heketi.db",
        "brick_min_size_gb" : 1,
    
        "kubeexec": {
          "rebalance_on_expansion": true
        },
    
        "sshexec": {
          "rebalance_on_expansion": true,
          "keyfile": "/etc/heketi/private_key",
          "fstab": "/etc/fstab",
          "port": "22",
          "user": "root",
          "sudo": false
        }
      },
    
      "_backup_db_to_kube_secret": "Backup the heketi database to a Kubernetes secret when running in Kubernetes. Default is off.",
      "backup_db_to_kube_secret": false
    }
    

    6.3 删除secret并重建

    [root@k8s-master01 kubernetes]# kubectl delete secret heketi-config-secret
    [root@k8s-master01 kubernetes]# kubectl create secret generic heketi-config-secret --from-file heketi.json
    

    6.4 更改heketi的deployment

    # env添加变量如下
    - name: HEKETI_IGNORE_STALE_OPERATIONS
              value: "true"
    

    7,GFS容器无法启动

    glusterd.service - GlusterFS, a clustered file-system server Loaded: loaded (/usr/lib/systemd/system/glusterd.service; enabled; vendor preset: disabled
    
    • 解决(新建集群,无数据):
    rm -rf /var/lib/heketi/
    rm -rf /var/lib/glusterd
    rm -rf /etc/glusterfs/
    yum remove glusterfs -y
    yum install glusterfs glusterfs-fuse -y
    
  • 相关阅读:
    优化Http请求、规则1减少Http请求 更新中
    js 验证日期格式
    SQL 在OPENQUERY中使用参数
    onpropertychange 和 onchange
    js 去掉空格
    检索 COM 类工厂中 CLSID 为 {000209FF00000000C000000000000046} 的组件时失败解决方法
    C#连接oracle数据库操作
    SQL游标
    MS SQL 设置大小写区别
    vs jquery 智能提示
  • 原文地址:https://www.cnblogs.com/xiaoqshuo/p/10096682.html
Copyright © 2020-2023  润新知