• 分布式minio


     http://slack.minio.org.cn/people/1

    https://blog.csdn.net/qq_42875105/article/details/106675711?utm_medium=distribute.pc_aggpage_search_result.none-task-blog-2~all~first_rank_v2~rank_v25-8-106675711.nonecase

    https://github.com/minio/operator

    hostpath形式

    1、node加标签

    kubectl  label nodes test-01 minio=true
    kubectl  label nodes test-02 minio=true
    kubectl  label nodes test-03 minio=true
    kubectl label nodes test-04 minio=true
     

    2、建立storageclass

    apiVersion: storage.k8s.io/v1
    kind: StorageClass
    metadata:
      name: local-storage
    provisioner: kubernetes.io/no-provisioner
    reclaimPolicy: Retain
    volumeBindingMode: WaitForFirstConsumer

    3、minio.yaml

    apiVersion: v1
    kind: Service
    metadata:
      name: cluster-minio
      namespace: velero
      labels:
        app: cluster-minio
    spec:
      clusterIP: None
      ports:
        - port: 9000
          name: cluster-minio
      selector:
        app: cluster-minio
    ---
    apiVersion: apps/v1
    kind: StatefulSet
    metadata:
      name: cluster-minio
      namespace: velero
    spec:
      selector:
        matchLabels:
          app: cluster-minio
      serviceName: cluster-minio
      replicas: 4
      template:
        metadata:
          labels:
            app: cluster-minio
        spec:
          affinity:
            podAntiAffinity:
              requiredDuringSchedulingIgnoredDuringExecution:
              - labelSelector:
                  matchExpressions:
                  - key: app
                    operator: In
                    values:
                    - "cluster-minio"
                topologyKey: kubernetes.io/hostname
          tolerations:
          - operator: Exists
          containers:
          - name: cluster-minio
            env:
            - name: MINIO_ACCESS_KEY
              value: "cluster-minio"
            - name: MINIO_SECRET_KEY
              value: "cluster-minio123"
            image: 10.1.11.46/k8s-deploy/minio:latest
            args:
            - server
            - http://cluster-minio-{0...3}.cluster-minio.velero.svc.cluster.local/data
            ports:
            - containerPort: 9000
            # These volume mounts are persistent. Each pod in the PetSet
            # gets a volume mounted based on this field.
            volumeMounts:
            - name: data
              mountPath: /data
      # These are converted to volume claims by the controller
      # and mounted at the paths mentioned above.
      volumeClaimTemplates:
      - metadata:
          name: data
        spec:
          accessModes:
            - ReadWriteOnce
          resources:
            requests:
              storage: 100Mi
          # Uncomment and add storageClass specific to your requirements below. Read more https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
          storageClassName: local-storage
          volumeMode: Filesystem

    4、建立pv

    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: minio-pv-0
    spec:
      accessModes:
      - ReadWriteOnce
      capacity:
        storage: 1Gi
      hostPath:
        path: /minio-data-0
        type: ""
      nodeAffinity:
        required:
          nodeSelectorTerms:
          - matchExpressions:
            - key: minio
              operator: In
              values:
              - "true"
      persistentVolumeReclaimPolicy: Retain
      storageClassName: local-storage
      volumeMode: Filesystem
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: minio-pv-1
    spec:
      accessModes:
      - ReadWriteOnce
      capacity:
        storage: 1Gi
      hostPath:
        path: /minio-data-1
        type: ""
      nodeAffinity:
        required:
          nodeSelectorTerms:
          - matchExpressions:
            - key: minio
              operator: In
              values:
              - "true"
      persistentVolumeReclaimPolicy: Retain
      storageClassName: local-storage
      volumeMode: Filesystem
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: minio-pv-2
    spec:
      accessModes:
      - ReadWriteOnce
      capacity:
        storage: 1Gi
      hostPath:
        path: /minio-data-2
        type: ""
      nodeAffinity:
        required:
          nodeSelectorTerms:
          - matchExpressions:
            - key: minio
              operator: In
              values:
              - "true"
      persistentVolumeReclaimPolicy: Retain
      storageClassName: local-storage
      volumeMode: Filesystem
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: minio-pv-3
    spec:
      accessModes:
      - ReadWriteOnce
      capacity:
        storage: 1Gi
      hostPath:
        path: /minio-data-3
        type: ""
      nodeAffinity:
        required:
          nodeSelectorTerms:
          - matchExpressions:
            - key: minio
              operator: In
              values:
              - "true"
      persistentVolumeReclaimPolicy: Retain
      storageClassName: local-storage
      volumeMode: Filesystem

    5、建立service

    apiVersion: v1
    kind: Service
    metadata:
      name: minio-nodeport
      namespace: velero
      labels:
        app: minio
    spec:
      type: NodePort
      ports:
        - port: 9000
          name: minio
          targetPort: 9000
      selector:
        app: minio

    nfs形式

    前提:新建好nfs相关内容

    nfs-client.yaml

    apiVersion: storage.k8s.io/v1
    kind: StorageClass
    metadata:
      annotations:
        NFSADDR: 192.168.92.147
        NFSPATH: /nfs/
        type: NFS
      name: nfs1
    parameters:
      archiveOnDelete: "false"
    provisioner: nfs-client-provisioner-nfs
    reclaimPolicy: Delete
    volumeBindingMode: Immediate

    1、sts

    apiVersion: v1
    kind: Service
    metadata:
      name: minio
      labels:
        app: minio
    spec:
      clusterIP: None
      ports:
        - port: 9000
          name: minio
      selector:
        app: minio
    ---
    apiVersion: apps/v1
    kind: StatefulSet
    metadata:
      name: minio
    spec:
      selector:
        matchLabels:
          app: minio
      serviceName: minio
      replicas: 4
      template:
        metadata:
          labels:
            app: minio
        spec:
          tolerations:
          - key: "node-role.kubernetes.io/master"
            operator: "Exists"
            effect: "NoSchedule"
          containers:
          - name: minio
            env:
            - name: MINIO_ACCESS_KEY
              value: "minio"
            - name: MINIO_SECRET_KEY
              value: "minio123"
            image: minio/minio:RELEASE.2020-06-01T17-28-03Z
            args:
            - server
            - http://minio-{0...3}.minio.default.svc.cluster.local/data
            ports:
            - containerPort: 9000
            # These volume mounts are persistent. Each pod in the PetSet
            # gets a volume mounted based on this field.
            volumeMounts:
            - name: data
              mountPath: /data
      # These are converted to volume claims by the controller
      # and mounted at the paths mentioned above.
      volumeClaimTemplates:
      - metadata:
          name: data
        spec:
          accessModes:
            - ReadWriteOnce
          resources:
            requests:
              storage: 1Gi
          # Uncomment and add storageClass specific to your requirements below. Read more https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
          storageClassName: nfs-client

    2、建立pv

    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: minio-pv-0
    spec:
      accessModes:
      - ReadWriteOnce
      capacity:
        storage: 1Gi
      nfs:
        path: /nfs/top/minio/0
        server: 192.168.*.*
      persistentVolumeReclaimPolicy: Delete
      storageClassName: nfs-client
      volumeMode: Filesystem
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: minio-pv-1
    spec:
      accessModes:
      - ReadWriteOnce
      capacity:
        storage: 1Gi
      nfs:
        path: /nfs/top/minio/1
        server: 192.168.*.*
      persistentVolumeReclaimPolicy: Delete
      storageClassName: nfs-client
      volumeMode: Filesystem
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: minio-pv-2
    spec:
      accessModes:
      - ReadWriteOnce
      capacity:
        storage: 1Gi
      nfs:
        path: /nfs/top/minio/2
        server: 192.168.*.*
      persistentVolumeReclaimPolicy: Delete
      storageClassName: nfs-client
      volumeMode: Filesystem
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: minio-pv-3
    spec:
      accessModes:
      - ReadWriteOnce
      capacity:
        storage: 1Gi
      nfs:
        path: /nfs/top/minio/3
        server: 192.168.*.*
      persistentVolumeReclaimPolicy: Delete
      storageClassName: nfs-client
      volumeMode: Filesystem

    3、登录nfs服务器新建目录

    mkdir -p /nfs/top/minio/{0,1,2,3}

    4、对外服务

    apiVersion: v1
    kind: Service
    metadata:
      labels:
        app: minio
      name: minio-svc-nodeport
      namespace: default
    spec:
      ports:
      - name: minio
        port: 9000
        protocol: TCP
        targetPort: 9000
    nodePort: 33007
    selector: app: minio sessionAffinity: None type: NodePort

    5、访问

    [root@host-239 minio]# kubectl  get svc                           
    NAME                                         TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)            AGE
    minio-svc-nodeport                           NodePort       10.*.*.*         <none>        9000:33007/TCP     59s

    使用nodeip加33007端口访问minio

    压测

    参考 https://www.cnblogs.com/yuhaohao/p/13099507.html

    https://blog.csdn.net/ff_gogo/article/details/85252189

    1、安装jdk环境和其他依赖

    yum install  -y wget nmap-ncat

    2、下载

    wget https://github.com/intel-cloud/cosbench/releases/download/v0.4.2.c4/0.4.2.c4.zip

    3、解压,修改配置

    主要修改conf/s3-config-sample.xml的storage内容,添加minio 账号密码和地址

    每个workflow可以多个workstage,每个workstage下可以有多个work;

    每钟workstage对应一个工作类别,有work下的type字段标识,总共有init(创建bucket),write(创建object写数据)、cleanup(删除object)、dispose(删除bucket)。

    修改后的内容:

    <?xml version="1.0" encoding="UTF-8" ?>
    <workload name="s3-sample" description="sample benchmark for s3">
    
      <storage type="s3" config="accesskey=minio;secretkey=minio123;endpoint=http://10.1.11.*:30780" />
    
      <workflow>
    
        <workstage name="init">
          <work type="init" workers="1" config="cprefix=s3testqwer;containers=r(1,2)" />
        </workstage>
    
        <workstage name="prepare">
          <work type="prepare" workers="1" config="cprefix=s3testqwer;containers=r(1,2);objects=r(1,10);sizes=c(128)KB" />
        </workstage>
    
        <workstage name="main">
          <work name="main" workers="8" runtime="30">
            <operation type="read" ratio="80" config="cprefix=s3testqwer;containers=u(1,2);objects=u(1,10)" />
            <operation type="write" ratio="20" config="cprefix=s3testqwer;containers=u(1,2);objects=u(11,20);sizes=c(128)KB" />
          </work>
        </workstage>
    
        <workstage name="cleanup">
          <work type="cleanup" workers="1" config="cprefix=s3testqwer;containers=r(1,2);objects=r(1,20)" />
        </workstage>
    
        <workstage name="dispose">
          <work type="dispose" workers="1" config="cprefix=s3testqwer;containers=r(1,2)" />
        </workstage>
    
      </workflow>
    
    </workload>
  • 相关阅读:
    uva 10269(floyd+Dijkstra)
    Codeforces Round #223 (Div. 2) 解题报告
    uva 11280(Dijkstra+递推)
    uva 10246(变形floyd)
    闲来无事:跳台阶
    opencv和javacv版本不一致
    javaCV:爱之初体验
    mevan引入容联云通讯jar
    复杂度函数的表示
    MySQL与mabits大小比较、日期比较示例
  • 原文地址:https://www.cnblogs.com/zphqq/p/13387891.html
Copyright © 2020-2023  润新知