• k8s-部署HBASE-2.3.6


    • 环境+版本
      k8s: v1.21.2
      hbase: v2.3.6

    dockerfile

    FROM openjdk:8-jdk
    ARG SSH_PUB='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3nTRJ/aVb67l1xMaN36jmIbabU7Hiv/xpZ8bwLVvNO3Bj7kUzYTp7DIbPcHQg4d6EsPC6j91E8zW6CrV2fo2Ai8tDO/rCq9Se/64F3+8oEIiI6E/OfUZfXD1mPbG7M/kcA3VeQP6wxNPhWBbKRisqgUc6VTKhl+hK6LwRTZgeShxSNcey+HZst52wJxjQkNG+7CAEY5bbmBzAlHCSl4Z0RftYTHR3q8LcEg7YLNZasUogX68kBgRrb+jw1pRMNo7o7RI9xliDAGX+E4C3vVZL0IsccKgr90222axsADoEjC9O+Q6uwKjahemOVaau+9sHIwkelcOcCzW5SuAwkezv 805899926@qq.com'
    RUN apt-get update;
    RUN apt-get install -y openssh-server net-tools vim git;
    RUN sed -i -r 's/^s*UseDNSs+w+/#/; s/^s*PasswordAuthentications+w+/#/; s/^s*ClientAliveIntervals+w+/#/' /etc/ssh/sshd_config;
    RUN echo 'UseDNS no 
    PermitRootLogin yes 
    PasswordAuthentication yes 
    ClientAliveInterval 30' >> /etc/ssh/sshd_config;
    RUN cat /etc/ssh/sshd_config
    RUN su root bash -c 'cd;mkdir .ssh;chmod 700 .ssh;echo ${SSH_PUB} > .ssh/authorized_keys;chmod 644 .ssh/authorized_keys'
    RUN su root bash -c 'cd;ssh-keygen -t rsa -f ~/.ssh/id_rsa; cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys'
    
    # hadoop
    ENV HADOOP_TGZ_URL=https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-3.2.2/hadoop-3.2.2.tar.gz
    ENV HADOOP_HOME=/opt/hadoop
    ENV PATH=$HADOOP_HOME/bin:$PATH
    RUN set -ex; 
        mkdir -p $HADOOP_HOME; 
        wget -nv -O $HADOOP_HOME/src.tgz $HADOOP_TGZ_URL; 
        tar -xf $HADOOP_HOME/src.tgz --strip-components=1 -C $HADOOP_HOME; 
        rm $HADOOP_HOME/src.tgz; 
        chown -R root:root $HADOOP_HOME;
    
    # clean trash file or dir
    RUN find $HADOOP_HOME/ -maxdepth 1 -not -name 'lib' ! -path $HADOOP_HOME/ -exec rm -rf {} ;
    
    # hbase
    ENV HBASE_TGZ_URL=https://mirrors.tuna.tsinghua.edu.cn/apache/hbase/stable/hbase-2.3.6-bin.tar.gz
    ENV HBASE_HOME=/opt/hbase
    ENV PATH=$HBASE_HOME/bin:$PATH
    RUN set -ex; 
        mkdir -p $HBASE_HOME; 
        wget -nv -O $HBASE_HOME/src.tgz $HBASE_TGZ_URL; 
        tar -xf $HBASE_HOME/src.tgz --strip-components=1 -C $HBASE_HOME; 
        rm $HBASE_HOME/src.tgz; 
        chown -R root:root $HBASE_HOME;
    
    RUN rm -rf $HBASE_HOME/docs/;
    
    COPY docker-entrypoint.sh /
    EXPOSE 22 9870 9000
    ENTRYPOINT ["/docker-entrypoint.sh"]
    

    docker-entrypoint.sh

    #!/bin/bash
    set -e
    
    service ssh start
    
    if [ $HBASE_NODE_TYPE = "master" ]; then
      echo -e "33[32m hbase master start 33[0m"
      $HBASE_HOME/bin/hbase master start
    elif [ $HBASE_NODE_TYPE = "region" ]; then
      echo -e "33[32m hbase region start 33[0m"
      $HBASE_HOME/bin/hbase regionserver start
    fi
    

    pod template

    apiVersion: v1
    kind: Service
    metadata:
      name: hbase-master
      namespace: big-data
    spec:
      selector:
        app: hbase-master
      type: NodePort
      ports:
        - name: "16000"
          port: 16000
          targetPort: 16000
          nodePort: 16000
        - name: "16010"
          port: 16010
          targetPort: 16010
          nodePort: 16010
    ---
    apiVersion: v1
    kind: ConfigMap
    metadata:
      name: hbase
      namespace: big-data
      labels:
        app: hbase
    data:
      hbase-env.sh: |
        export JAVA_HOME=/usr/local/openjdk-8
        export HBASE_DISABLE_HADOOP_CLASSPATH_LOOKUP="true"
        export LD_LIBRARY_PATH=${HADOOP_HOME}/lib/native:$LD_LIBRARY_PATH
        export HBASE_HEAPSIZE=1G
      hbase-site.xml: |
        <?xml version="1.0"?>
        <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
        <configuration>
          <property>
            <name>hbase.cluster.distributed</name>
            <value>true</value>
          </property>
          <property>
            <name>hbase.rootdir</name>
            <value>hdfs://hadoop-master.big-data:9000/hbase</value>
          </property>
          <property>
            <name>hbase.tmp.dir</name>
            <value>/opt/hbase/tmp/</value>
          </property>
          <property>
            <name>hbase.zookeeper.quorum</name>
            <value>zk-cs.default.svc</value>
          </property>
          <property>
              <name>hbase.regionserver.restart.on.zk.expire</name>
              <value>true</value>
          </property>
          <property>
            <name>hbase.unsafe.stream.capability.enforce</name>
            <value>false</value>
          </property>
        </configuration>
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: hbase-master
      namespace: big-data
    spec:
      strategy:
        type: Recreate
      selector:
        matchLabels:
          app: hbase-master
      template:
        metadata:
          labels:
            app: hbase-master
        spec:
          hostname: master
          subdomain: hbase-master
          volumes:
            - name: hbase-env
              configMap:
                name: hbase
                items:
                  - key: hbase-env.sh
                    path: hbase-env.sh
            - name: hbase-site
              configMap:
                name: hbase
                items:
                  - key: hbase-site.xml
                    path: hbase-site.xml
            - name: hbase-data
              persistentVolumeClaim:
                claimName: data-hbase-master
          containers:
            - name: hbase-master
              image: registry:5000/hbase
              imagePullPolicy: Always
              ports:
                - containerPort: 16000
                - containerPort: 16010
              volumeMounts:
                - name: hbase-env
                  mountPath: /opt/hbase/conf/hbase-env.sh
                  subPath: hbase-env.sh
                - name: hbase-site
                  mountPath: /opt/hbase/conf/hbase-site.xml
                  subPath: hbase-site.xml
                - name: hbase-data
                  mountPath: /opt/hbase/logs/
                  subPath: logs
              env:
                - name: HBASE_NODE_TYPE
                  value: master
    ---
    apiVersion: v1
    kind: PersistentVolumeClaim
    metadata:
      name: data-hbase-master
      namespace: big-data
    spec:
      accessModes:
        - ReadWriteMany
      resources:
        requests:
          storage: 256Gi
      storageClassName: "managed-nfs-storage"
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: hbase-region
      namespace: big-data
    spec:
      selector:
        app: hbase-region
      ports:
        - name: "rpc"
          port: 16020
        - name: "http"
          port: 16030
    ---
    apiVersion: apps/v1
    kind: StatefulSet
    metadata:
      name: hbase-region
      namespace: big-data
    spec:
      replicas: 2
      selector:
        matchLabels:
          app: hbase-region
      serviceName: hbase-region
      template:
        metadata:
          labels:
            app: hbase-region
        spec:
          volumes:
            - name: hbase-env
              configMap:
                name: hbase
                items:
                  - key: hbase-env.sh
                    path: hbase-env.sh
            - name: hbase-site
              configMap:
                name: hbase
                items:
                  - key: hbase-site.xml
                    path: hbase-site.xml
          containers:
            - name: hbase-region
              image: registry:5000/hbase
              imagePullPolicy: Always
              ports:
                - containerPort: 16020
                - containerPort: 16030
              volumeMounts:
                - name: hbase-env
                  mountPath: /opt/hbase/conf/hbase-env.sh
                  subPath: hbase-env.sh
                - name: hbase-site
                  mountPath: /opt/hbase/conf/hbase-site.xml
                  subPath: hbase-site.xml
                - name: data
                  mountPath: /opt/hbase/logs/
                  subPath: hbase/logs
              env:
                - name: HBASE_NODE_TYPE
                  value: region
      volumeClaimTemplates:
        - metadata:
            name: data
          spec:
            accessModes:
              - ReadWriteMany
            resources:
              requests:
                storage: 256Gi
            storageClassName: "managed-nfs-storage"
    
    
  • 相关阅读:
    [BZOJ2969] 矩形粉刷
    数字 (number)
    字符串(String)
    小HY的四元组
    最大公约数(Max Gcd)
    [洛谷P2102] 地砖铺设
    Python OS模块(内置模块)
    json解析神器--jsonpath
    kafka 优势+应用场景
    Python之异常处理
  • 原文地址:https://www.cnblogs.com/chenzhaoyu/p/15141700.html
Copyright © 2020-2023  润新知