GlusterFS是一个开源的分布式文件系统,具有强大的横向扩展能力,通过扩展能够支持数PB存储容量和处理数千客户端。GlusterFS借助TCP/IP或InfiniBandRDMA网络将物理分布的存储资源聚集在一起,使用单一全局命名空间来管理数据。
Heketi(https://github.com/heketi/heketi),是一个基于RESTful API的GlusterFS卷管理框架。Heketi可以方便地和云平台整合,提供RESTful API供Kubernetes调用,实现多GlusterFS集群的卷管理。另外,Heketi还有保证bricks和它对应的副本均匀分布在集群中的不同可用区的优点。
环境
[root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready master,node 44d v1.13.7
k8s-node1 Ready master,node 44d v1.13.7
k8s-node2 Ready master,node 44d v1.13.7
三台机器每台都新增一块硬盘/dev/sdb
给安装glusterfs机器打label,因为我这里只有三台机器,所以我就直接指定了all打得label
kubectl label nodes --all storagenode=glusterfs
每台机器都安装glusterfs得客户端软件和加载需要得内核模块
yum install -y glusterfs-client
modprobe dm_snapshot
modprobe dm_mirror
modprobe dm_thin_pool
[root@kube-node1 ~]# vi /etc/sysconfig/modules/glusterfs.modules
#!/bin/bash
for kernel_module in dm_snapshot dm_mirror dm_thin_pool;do
/sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe ${kernel_module}
fi
done;
[root@kube-node1 ~]# chmod +x /etc/sysconfig/modules/glusterfs.modules
部署GlusterFS
git clone https://github.com/gluster/gluster-kubernetes.git
修改拓扑配置文件
cd gluster-kubernetes/deploy
cp topology.json.sample topology.json
cat topology.json
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"k8s-master" ### node hostname
],
"storage": [
"192.168.4.180" ### node ip
]
},
"zone": 1
},
"devices": [
"/dev/sdb" ### node disk
]
},
{
"node": {
"hostnames": {
"manage": [
"k8s-node1"
],
"storage": [
"192.168.4.104"
]
},
"zone": 1
},
"devices": [
"/dev/sdb"
]
},
{
"node": {
"hostnames": {
"manage": [
"k8s-node2"
],
"storage": [
"192.168.4.227"
]
},
"zone": 1
},
"devices": [
"/dev/sdb"
]
}
]
}
]
}
kubeadm 1.15.3版本 kubectl get pod 没有--show-all选项了
因此需要修改gk-deploy脚本
把`heketi_pod=$(${CLI} get pod --no-headers --show-all --selector="heketi" | awk '{print $1}')` 改成 `heketi_pod=$(${CLI} get pod --no-headers -A --selector="heketi" | awk '{print $2}')`
初始化gluster集群,和 heketi
kubectl create ns glusterfs
./gk-deploy -g --admin-key adminkey --user-key userkey -y -n glusterfs
删除gluster集群
./gk-deploy --abort --admin-key adminkey --user-key userkey -y -n glusterfs
kubectl delete ns glusterfs
如果安装过程中有问题 需要重装,需要进行一下步骤
./gk-deploy --abort --admin-key adminkey --user-key userkey -y -n glusterfs
kubectl delete ns glusterfs
下面命令是每个glusterfs集群需做的
dmsetup ls
dmsetup remove_all
rm -rf /var/log/glusterfs/
rm -rf /var/lib/heketi
rm -rf /var/lib/glusterd/
rm -rf /etc/glusterfs/
dd if=/dev/zero of=/dev/sdb bs=512k count=1 # 这里的/dev/sdb是要写你配置的硬盘路径
下载heketi客户端
wget https://github.com/heketi/heketi/releases/download/v9.0.0/heketi-client-v9.0.0.linux.amd64.tar.gz
heketi相关操作
导出HEKETI SERVER地址
[root@k8s-master ~]# export HEKETI_CLI_SERVER=http://$( kubectl get svc heketi -n glusterfs -o go-template='{{.spec.clusterIP}}'):8080
查看拓扑信息
[root@k8s-master ~]# heketi-cli --user admin --secret adminkey topology info
列出volume
[root@k8s-master ~]# heketi-cli --user admin --secret adminkey volume list
Id:6a20563884e39264d7111331ad641dee Cluster:375e1a7040cafe02c5da11b633a0bee8 Name:heketidbstorage
查看volume信息
[root@k8s-master ~]# heketi-cli --user admin --secret adminkey volume info 6a20563884e39264d7111331ad641dee
Name: heketidbstorage
Size: 2
Volume Id: 6a20563884e39264d7111331ad641dee
Cluster Id: 375e1a7040cafe02c5da11b633a0bee8
Mount: 192.168.4.227:heketidbstorage
Mount Options: backup-volfile-servers=192.168.4.180,192.168.4.104
Block: false
Free Size: 0
Reserved Size: 0
Block Hosting Restriction: (none)
Block Volumes: []
Durability Type: replicate
Distribute Count: 1
Replica Count: 3
生成endpoints配置文件并创建
[root@k8s-master ~]# heketi-cli volume create --user admin --secret adminkey --size=5
--persistent-volume
--persistent-volume-endpoint=heketi-storage-endpoints >heketi-storage-endpoints.yaml
[root@k8s-master ~]# cat heketi-storage-endpoints.yaml
{
"kind": "PersistentVolume",
"apiVersion": "v1",
"metadata": {
"name": "glusterfs-ae93a2ad",
"creationTimestamp": null,
"annotations": {
"pv.beta.kubernetes.io/gid": "0"
}
},
"spec": {
"capacity": {
"storage": "5Gi"
},
"glusterfs": {
"endpoints": "heketi-storage-endpoints",
"path": "vol_ae93a2ad69e4536f946fc1d7ccb30ba1"
},
"accessModes": [
"ReadWriteMany"
],
"persistentVolumeReclaimPolicy": "Retain"
},
"status": {}
}
创建pv
[root@k8s-master ~]# kubectl apply -f heketi-storage-endpoints.yaml
[root@k8s-master ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
glusterfs-ae93a2ad 5Gi RWX Retain Available 12s
查看svc和ep
[root@k8s-master ~]# kubectl get svc,ep
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/heketi ClusterIP 10.254.253.62 <none> 8080/TCP 52m
service/heketi-storage-endpoints ClusterIP 10.254.5.159 <none> 1/TCP 53m
service/kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 46d
service/nginx-ingress-nginx-ingress ClusterIP 10.254.9.197 <none> 80/TCP,443/TCP 6d5h
NAME ENDPOINTS AGE
endpoints/heketi 172.30.208.2:8080 52m
endpoints/heketi-storage-endpoints 192.168.4.104:1,192.168.4.180:1,192.168.4.227:1 53m
endpoints/kubernetes 192.168.4.104:6443,192.168.4.180:6443,192.168.4.227:6443 46d
endpoints/nginx-ingress-nginx-ingress 192.168.4.180:80,192.168.4.180:443 6d5h
查看PVC配置文件
[root@k8s-master kube-templates]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
gluster-s3-claim Bound pvc-327cd12f-c3ec-11e9-b71f-000c29b732e9 5Gi RWX gluster-heketi 50s
查看busybox应用配置文件
[root@k8s-master ~]# cat app.yaml
apiVersion: v1
kind: Pod
metadata:
name: busybox
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
name: busybox
volumeMounts:
- mountPath: /usr/share/busybox
name: mypvc
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: gluster-s3-claim
创建应用
[root@k8s-master ~]# kubectl apply -f app.yaml
pod/busybox created
[root@k8s-master ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 4m27s
glusterfs-klbqw 1/1 Running 0 178m
glusterfs-md65v 1/1 Running 0 178m
glusterfs-tfzf2 1/1 Running 0 178m
heketi-69bcb5fb6b-jr9sv 1/1 Running 0 170m
nginx-ingress-nginx-ingress-7764698b49-sn94f 1/1 Running 6 6d7h
[root@k8s-master ~]# kubectl exec -it busybox -- bash
/ # df -h
Filesystem Size Used Available Use% Mounted on
overlay 17.0G 13.9G 3.1G 82% /
tmpfs 64.0M 0 64.0M 0% /dev
tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup
/dev/mapper/centos-root
17.0G 13.9G 3.1G 82% /dev/termination-log
/dev/mapper/centos-root
17.0G 13.9G 3.1G 82% /etc/resolv.conf
/dev/mapper/centos-root
17.0G 13.9G 3.1G 82% /etc/hostname
/dev/mapper/centos-root
17.0G 13.9G 3.1G 82% /etc/hosts
shm 64.0M 0 64.0M 0% /dev/shm
192.168.4.104:vol_1fb8fc67ded41f0ad8a64aa3730e8635
5.0G 83.7M 4.9G 2% /usr/share/busybox
设置gluster storageClass,自动管理创建PV和PVC
创建gluster storageClass
admin得key,需要创建secret存放
kubectl create secret generic heketi-secret --type="kubernetes.io/glusterfs" --from-literal=key='adminkey' --namespace=glusterfs
创建存储类 #clusterid可以通过heketi-cli --user admin --secret adminkey volume list 查看到
cat glusterfs-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: slow
namespace: glusterfs
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://10.245.169.182:8080"
clusterid: "d27bf12363831dfd94bccef4367d5ae1"
restauthenabled: "true"
restuser: "admin"
secretNamespace: "default"
secretName: "heketi-secret"
gidMin: "40000"
gidMax: "50000"
volumetype: "replicate:3"
[root@kube-master yaml]# kubectl apply -f glusterfs-storageclass.yaml
创建statefulSet应用
[root@kube-master yaml]# cat nginx.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
run: nginx
name: nginx
spec:
replicas: 3
serviceName: "nginx"
selector:
matchLabels:
run: nginx
template:
metadata:
labels:
run: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "slow"
resources:
requests:
storage: 1Gi
[root@kube-master yaml]# kubectl apply -f nginx.yaml
[root@kube-master yaml]# kubectl get pv,pvc
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
persistentvolume/pvc-229e21dc-9949-49ea-8f89-132402118682 1Gi RWO Delete Bound default/www-nginx-2 slow 9m21s
persistentvolume/pvc-4759e787-3362-478d-9f8d-367a7350a659 1Gi RWO Delete Bound default/www-nginx-0 slow 10m
persistentvolume/pvc-7eed3b09-09fc-4437-81bc-79f48d6e7654 1Gi RWO Delete Bound default/www-nginx-1 slow 9m57s
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/www-nginx-0 Bound pvc-4759e787-3362-478d-9f8d-367a7350a659 1Gi RWO slow 10m
persistentvolumeclaim/www-nginx-1 Bound pvc-7eed3b09-09fc-4437-81bc-79f48d6e7654 1Gi RWO slow 10m
persistentvolumeclaim/www-nginx-2 Bound pvc-229e21dc-9949-49ea-8f89-132402118682 1Gi RWO slow 9m29s