• 部署带有验证的es集群及创建快照


    1️⃣ 环境准备
    ① 关闭防火墙、selinux
    sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
    setenforce 0
    systemctl stop firewalld
    systemctl disable firewalld

    ② 修改系统最大打开文件数和进程数
    cat <<EOF >> /etc/security/limits.conf
    * soft nofile 65536
    * hard nofile 65536
    * soft nproc 20480
    * hard nproc 40960
    EOF
    echo vm.max_map_count=655360 >> /etc/sysctl.conf
    sysctl -p

    ③ 配置主机名及互信
    ④ 配置yum源
    yum -y install wget vim git wget unzip telnet lsof
    cd /etc/yum.repos.d/
    mkdir backup
    mv *.repo backup
    # 阿里云yun源

    wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
    wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
    yum clean all
    yum makecache

    # epel源
    yum -y install epel-release
    yum clean all
    yum makecache
    # elk源
    cat <<EOF > /etc/yum.repos.d/elk.repo
    [elk]
    name=elk
    baseurl=https://mirrors.tuna.tsinghua.edu.cn/elasticstack/yum/elastic-7.x/
    enable=1
    gpgcheck=0
    EOF

    ⑤ 源码安装java # 此步骤可忽略,高版本的es集成了java环境
    mkdir -p /data/apps/
    tar -xf jdk-8u11-linux-x64.tar.gz
    mv jdk1.8.0_11/ jdk
    cat <<EOF > /etc/profile.d/jdk.sh
    JAVA_HOME=/data/apps/jdk
    CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
    PATH=$JAVA_HOME/bin:$PATH
    export JAVA_HOME CLASSPATH PATH
    EOF
    source /etc/profile

    2️⃣ 安装elasticsearch集群

    ① 下载
    wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.3.0-linux-x86_64.tar.gz
    tar -xf elasticsearch-7.3.0-linux-x86_64.tar.gz
    mv elasticsearch-7.3.0 /data/apps/elasticsearch
    cd /data/apps
    useradd es
    chown -R es.es elasticsearch
    su - es
    mkdir -pv /home/es/{data,logs}/es

    ② 配置 config/elaticsearch.yml

    cluster.name: es-prd # 集群名称
    node.name: es-node1 # 节点名称
    #node.master: true
    #node.data: true
    path.data: /home/es/data/es1,/home/es/data/es2 # es索引库的数据存储目录
    path.logs: /home/es/logs/es # es进程启动后,对应的日志信息存放目录
    path.repo: /data/es_bk # 备份文件
    network.host: 0.0.0.0
    network.publish_host: 192.168.1.19
    http.port: 9200
    transport.tcp.port: 9300 # 节点间交互的tcp端口,默认9300
    
    #cluster.initial_master_nodes: ["es-node1"]
    discovery.zen.minimum_master_nodes: 2 # 防脑裂,集群中至少又2台节点可用,否则集群就瘫痪。计算公式: 节点数/2+1
    #discovery.zen.ping.unicast.hosts: ["es-node1","es-node2","es-node3"] #

     discovery.seed_hosts: ["192.168.1.19", "192.168.1.20", "192.168.1.22"]
     cluster.initial_master_nodes: ["es-node1", "es-node2","es-node3"]

    
    discovery.zen.ping_timeout: 60s # 心跳超时时间 
    discovery.zen.fd.ping_interval: 120s     # 节点检测时间 
    discovery.zen.fd.ping_timeout: 120s     #ping 超时时间 
    discovery.zen.fd.ping_retries: 3     # 心跳重试次数
    #gateway.recover_after_nodes: 4
    #gateway.recover_after_time: 10m
    #gateway.expected_nodes: 5
    bootstrap.system_call_filter: false
    
    # 允许跨域请求
    http.cors.enabled: true
    http.cors.allow-origin: "*"
    http.cors.allow-credentials: true
    
    xpack.security.enabled: false # 开启auth认证
    xpack.security.transport.ssl.enabled: true
    xpack.security.transport.ssl.verification_mode: certificate
    xpack.security.transport.ssl.keystore.path: certs/elastic-certificates.p12
    xpack.security.transport.ssl.truststore.path: certs/elastic-certificates.p12
    http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type

    *** 调整es堆内存 建议调整为物理内存的50% 但不要超过32G
    vim jvm.options
    -Xms30g
    -Xmx30g

    *** es 参数调优
    index.merge.scheduler.max_thread_count:1 # 索引 merge 最大线程数
    indices.memory.index_buffer_size:30% # 内存
    index.translog.durability:async # 这个可以异步写硬盘,增大写的速度
    index.translog.sync_interval:120s #translog 间隔时间
    discovery.zen.ping_timeout:120s # 心跳超时时间
    discovery.zen.fd.ping_interval:120s # 节点检测时间
    discovery.zen.fd.ping_timeout:120s #ping 超时时间
    discovery.zen.fd.ping_retries:6 # 心跳重试次数
    thread_pool.bulk.size:20 # 写入线程个数 由于我们查询线程都是在代码里设定好的,我这里只调节了写入的线程数
    thread_pool.bulk.queue_size:1000 # 写入线程队列大小
    index.refresh_interval:300s #index 刷新间隔
    bootstrap.memory_lock: true

    ③ 启动
    #Running as a daemon
    ./bin/elasticsearch -d -p pid_file
    # shut down Elasticsearch
    pkill -F pid_file
    # Checking that Elaelasticsearch is running
    curl -XGET 'http://127.0.0.1:9200'


    **** 使用systemd 管理es集群
    vim /usr/lib/systemd/system/es.service
    [Unit]
    Description=Elasticsearch
    Documentation=http://www.elastic.co
    Wants=network-online.target
    After=network-online.target

    [Service]
    Restart=always
    Type=simple
    PrivateTmp=true
    Environment=ES_HOME=/data/apps/elasticsearch-7.3.0
    Environment=ES_PATH_CONF=/data/apps/elasticsearch-7.3.0/config
    Environment=PID_DIR=/data/apps/elasticsearch-7.3.0
    Environment=ES_SD_NOTIFY=true
    #EnvironmentFile=/etc/sysconfig/elasticsearch

    WorkingDirectory=/data/apps/elasticsearch-7.3.0

    User=es
    Group=es

    ExecStart=/data/apps/elasticsearch-7.3.0/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet

    # StandardOutput is configured to redirect to journalctl since
    # some error messages may be logged in standard output before
    # elasticsearch logging system is initialized. Elasticsearch
    # stores its logs in /var/log/elasticsearch and does not use
    # journalctl by default. If you also want to enable journalctl
    # logging, you can simply remove the "quiet" option from ExecStart.
    StandardOutput=journal
    StandardError=inherit

    # Specifies the maximum file descriptor number that can be opened by this process
    LimitNOFILE=65535

    # Specifies the maximum number of processes
    LimitNPROC=20480

    LimitMEMLOCK=infinity

    # Specifies the maximum size of virtual memory
    LimitAS=infinity

    # Specifies the maximum file size
    LimitFSIZE=infinity

    # Disable timeout logic and wait until process is stopped
    TimeoutStopSec=0

    # SIGTERM signal is used to stop the Java process
    KillSignal=SIGTERM

    # Send the signal only to the JVM rather than its control group
    KillMode=process

    # Java process is never killed
    SendSIGKILL=no

    # When a JVM receives a SIGTERM signal it exits with code 143
    SuccessExitStatus=143

    [Install]
    WantedBy=multi-user.target

    # Built for ${project.name}-${project.version} (${project.name})

    systemctl daemon-reload
    systemctl enable es
    systemctl start es

    # 配置tls和身份验证

    1️⃣ 创建证书文件(master上执行)
    ./bin/elasticsearch-certutil ca # 一直回车
    ./bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12 # 一直回车

    mkdir config/certs
    mv elastic-*.p12 config/certs/
    chown -R es.es config/certs/

    2️⃣ 修改配置并重启
    cat >> config/elasticsearch.yml <<EOF
    xpack.security.enabled: true
    xpack.security.transport.ssl.enabled: true
    xpack.security.transport.ssl.verification_mode: certificate
    xpack.security.transport.ssl.keystore.path: certs/elastic-certificates.p12
    xpack.security.transport.ssl.truststore.path: certs/elastic-certificates.p12
    EOF

    3️⃣ 生成客户端证书

    bin/elasticsearch-keystore create
    bin/elasticsearch-certutil cert --ca
    config/certs/elastic-stack-ca.p12
    -name "CN=esuser,OU=dev,DC=weqhealth,DC=com"
    #回车
    client.p12
    # 回车

    mv client.p12 config/certs/
    cd config/certs/
    openssl pkcs12 -in client.p12 -nocerts -nodes > client-key.pem
    openssl pkcs12 -in client.p12 -clcerts -nokeys > client.crt
    openssl pkcs12 -in client.p12 -cacerts -nokeys -chain > client-ca.crt

    chown es.es client*

    scp * es-node2:/data/apps/elasticsearch/config/certs/

    scp * es-node3:/data/apps/elasticsearch/config/certs/

    4️⃣ 设置默认密码
    bin/elasticsearch-setup-passwords interactive # auto

    Changed password for user apm_system
    PASSWORD apm_system = ktfrkXe3aA2qz1UgLoBR

    Changed password for user kibana
    PASSWORD kibana = HQuZIBunJgTRuAnXdXga

    Changed password for user logstash_system
    PASSWORD logstash_system = BclvBlUd378SSBlJ832x

    Changed password for user beats_system
    PASSWORD beats_system = gYiAWtiHdMBMsY8Nj86L

    Changed password for user remote_monitoring_user
    PASSWORD remote_monitoring_user = jaF3jzw08GKFuVBh78Ri

    Changed password for user elastic
    PASSWORD elastic = IIti4qJDEi6X2LX2iNmd

    # 验证

    curl --user username:password  -XGET 'ip:9200/_cat/health?v&pretty'

    # 安全重启es

    chown -R es.es /data/apps/elasticsearch

    ① 禁用分片规则
    PUT _cluster/settings
    {
    "persistent": {
    "cluster.routing.allocation.enable": "primaries"
    }
    }
    ② 重启
    ③ 开启分片
    PUT _cluster/settings
    {
    "persistent": {
    "cluster.routing.allocation.enable": null
    }
    }


    # 查看集群状况
    http://192.168.27.157:9200/_cat/nodes?
    http://192.168.27.157:9100/?auth_user=elastic&auth_password=IIti4qJDEi6X2LX2iNmd


    ====> ES-Head Plugin 方便对ES进行各种操作的客户端工具
    https://github.com/mobz/elasticsearch-head
    *** 插件不能安装在es的plugin目录下
    git clone git://github.com/mobz/elasticsearch-head.git
    cd elasticsearch-head
    yum -y install nodejs npm
    npm init -f # 解决 npm WARN enoent ENOENT: no such file or directory, open '/soft/elasticsearch/plugins/package.json'
    npm install -g grunt-cli
    npm install grunt --save
    npm install grunt-contrib-clean
    npm install grunt-contrib-concat
    npm install grunt-contrib-watch
    npm install grunt-contrib-connect
    npm install grunt-contrib-copy
    npm install phantomjs-prebuilt@2.1.14 --ignore-scripts
    npm install grunt-contrib-jasmine

    # elasticsearch-head 目录下的 Gruntfile.js 文件,在 options 属性内增加 hostname,设置为 0.0.0.0
    connect: {
    server: {
    options: {
    hostname: '0.0.0.0',
    port: 9100,
    base: '.',
    keepalive: true
    }
    }
    }
    # 修改elasticsearch-head/_site/app.js
    this.base_uri = this.config.base_uri || this.prefs.get("app-base_uri") || "http://node-1:9200";

    # 启动elasticsearch-head
    nohup grunt server > /dev/null 2>&1 &


    ====> kibana Plugin 读取es集群中索引库的type信息,并使用可视化的方式呈现
    wget https://artifacts.elastic.co/downloads/kibana/kibana-7.3.2-linux-x86_64.tar.gz
    shasum -a 512 kibana-7.3.2-linux-x86_64.tar.gz
    tar -xzf kibana-7.3.2-linux-x86_64.tar.gz
    mv kibana-7.3.2-linux-x86_64 kibana
    mv kibana /data/apps/es-plugin
    cd /data/apps/es-plugin/kibana

    vim config/kibana.yml
    i18n.locale: "zh-CN"
    server.host: "192.168.27.157" //监听IP地址,建议内网ip
    #elasticsearch.hosts: ["http://192.168.27.157:9200"]   //elasticsearch连接kibana的URL,可任选一个节点
    elasticsearch.username: "kibana"
    elasticsearch.password: "HQuZIBunJgTRuAnXdXga"

    server.port: 5601 //监听端口

    useradd -M kibana
    chown -r kibana.kibana kibana
    su - kibana
    nohup ./bin/kibana &

     # systemd管理kibana

    [Unit]
    Description=kibana

    [Service]
    Type=simple
    User=kibana
    Group=kibana
    ExecStart=/data/apps/pluginsEs/kibana/bin/kibana
    Restart=always

    [Install]
    WantedBy=multi-user.target

    es性能优化
    提升段合并速度(固态盘)

    PUT /_cluster/settings
    {
    "persistent" : {
    "indices.store.throttle.max_bytes_per_sec" : "100mb"
    }
    }


    # 部署nfs共享存储
    服务端:
    yum -y install nfs-utils
    systemctl enable rpcbind
    systemctl enable nfs
    systemctl start rpcbind
    systemctl start nfs

    #firewall-cmd --zone=public --permanet --add-service={rpc-bind,mountd,nfs}
    #firewall-cmd --reload

    echo '/path/ 192.168.1.0/24(rw,sync,root_squash,no_all_squash)' > /etc/exports
    systemctl restart nfs

    # 查看
    showmount -e localhost

    客户端
    yum -y install nfs-utils
    systemctl enable rpcbind
    systemctl restart rpcbind
    查看服务端共享目录
    showmount -e 192.168.27.158
    # 挂载
    mount -t nfs 192.168.27.158:/path /path
    # 查看
    mount

    # 自动挂载
    vim /etc/fstab
    192.168.27.158:/path /path nfs defaults 0 0
    systemctl daemon-reload

    # 创建仓库
    curl -XPUT -u elastic:IIti4qJDEi6X2LX2iNmd http://192.168.27.157:9200/_snapshot/my_backup -H 'Content-Type: application/json' -d '{"type": "fs", "settings": {"location": "/data/es_bk", "compress": true}}'
    # 创建快照
    curl -XPUT -u elastic:IIti4qJDEi6X2LX2iNmd http://192.168.27.157:9200/_snapshot/my_backup/snapshot_1?wait_for_completion=true
    # 恢复快照
    curl -XPOST http://127.0.0.1:9200/_snapshot/my_backup/snapshot_1/_restore

    # 查看仓库
    curl -XGET -u elastic:IIti4qJDEi6X2LX2iNmd "http://192.168.27.157:9200/_snapshot/my_backup?pretty"
    # 查看快照
    curl -XGET -u elastic:IIti4qJDEi6X2LX2iNmd "http://192.168.27.157:9200/_snapshot/my_backup/_all?pretty"
    # 删除快照
    curl -XDELETE -u elastic:IIti4qJDEi6X2LX2iNmd "http://192.168.27.157:9200/_snapshot/my_backup/snapshot_1"

    #  设置密码如果报错,可先关掉密码验证,然后删掉对应的文件 ,然后在开启密码验证

    curl -XGET http://192.168.1.19:9200/_cat/indices
    
    curl -XDELETE http://192.168.1.19:9200/.security-7
  • 相关阅读:
    linux下文件的复制、移动与删除
    Hbase万亿级存储性能优化总结-配置
    hbase 读写和优化
    hive数据倾斜定位及处理
    flink初识及安装flink standalone集群
    【Linux】用less查看日志文件
    sqoop的详细使用及原理
    HBase删除数据的原理
    hbase数据加盐(Salting)存储与协处理器查询数据的方法
    Hbase内存磁盘大致关系
  • 原文地址:https://www.cnblogs.com/ray-mmss/p/12127383.html
Copyright © 2020-2023  润新知