ES集群
[root@master-node ~]# rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
[root@master-node ~]# vim /etc/yum.repos.d/elastic.repo # 增加以下内容
[elasticsearch-6.x]
name=Elasticsearch repository for 6.x packages
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
[root@master-node ~]# yum install -y elasticsearch
如果使用官方的源下载实在太慢的话,也可以直接下载rpm包进行安装:
[root@master-node ~]# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.0.0.rpm
[root@master-node ~]# rpm -ivh elasticsearch-6.0.0.rpm
配置es
elasticsearch配置文件在这两个地方,有两个配置文件:
[root@master-node ~]# ll /etc/elasticsearch
总用量 16
-rw-rw---- 1 root elasticsearch 2869 2月 17 03:03 elasticsearch.yml
-rw-rw---- 1 root elasticsearch 2809 2月 17 03:03 jvm.options
-rw-rw---- 1 root elasticsearch 5091 2月 17 03:03 log4j2.properties
[root@local ~]# ll /etc/sysconfig/elasticsearch
-rw-rw---- 1 root elasticsearch 1613 2月 17 03:03 /etc/sysconfig/elasticsearch
[root@master-node ~]#
elasticsearch.yml 文件用于配置集群节点等相关信息的,elasticsearch 文件则是配置服务本身相关的配置,例如某个配置文件的路径以及java的一些路径配置什么的。
官方的配置文档:
https://www.elastic.co/guide/en/elasticsearch/reference/6.0/rpm.html
开始配置集群节点,在 192.168.77.128 上编辑配置文件:
[root@master-node ~]# vim /etc/elasticsearch/elasticsearch.yml # 增加或更改以下内容
cluster.name: master-node # 集群中的名称
node.name: master # 该节点名称
node.master: true # 意思是该节点为主节点
node.data: false # 表示这不是数据节点
network.host: 0.0.0.0 # 监听全部ip,在实际环境中应设置为一个安全的ip
http.port: 9200 # es服务的端口号
discovery.zen.ping.unicast.hosts: ["192.168.77.128", "192.168.77.130", "192.168.77.134"] # 配置自动发现
[root@master-node ~]#
然后将配置文件发送到另外两台机器上去:
[root@master-node ~]# scp /etc/elasticsearch/elasticsearch.yml data-node1:/tmp/
[root@master-node ~]# scp /etc/elasticsearch/elasticsearch.yml data-node2:/tmp/
到两台机器上去更改该文件,修改以下几处地方:
192.168.77.130:
[root@data-node1 ~]# vim /tmp/elasticsearch.yml
node.name: data-node1
node.master: false
node.data: true
[root@data-node1 ~]# cp /tmp/elasticsearch.yml /etc/elasticsearch/elasticsearch.yml
cp: overwrite ‘/etc/elasticsearch/elasticsearch.yml’? yes
[root@data-node1 ~]#
192.168.77.134:
[root@data-node2 ~]# vim /tmp/elasticsearch.yml
node.name: data-node2
node.master: false
node.data: true
[root@data-node2 ~]# cp /tmp/elasticsearch.yml /etc/elasticsearch/elasticsearch.yml
cp: overwrite ‘/etc/elasticsearch/elasticsearch.yml’? yes
[root@data-node2 ~]#
完成以上的配置之后,到主节点上,启动es服务:
systemctl start elasticsearch.service
主节点启动完成之后,再启动其他节点的es服务。
排错记录:
我这里启动主节点的时候没有启动成功,于是查看es的日志,但是却并没有生成,那就只能去看系统日志了:
[root@master-node ~]# ls /var/log/elasticsearch/
[root@master-node ~]# tail -n50 /var/log/messages
错误日志如下:
搭建ELK日志分析平台(上)—— ELK介绍及搭建 Elasticsearch 分布式集群
如图,可以看到是JDK的路径配置得不对,没法在PATH里找到相应的目录。
于是查看JAVA_HOME环境变量的值指向哪里:
[root@master-node ~]# echo $JAVA_HOME
/usr/local/jdk1.8/
[root@master-node ~]# ls /usr/local/jdk1.8/
bin db javafx-src.zip lib man release THIRDPARTYLICENSEREADME-JAVAFX.txt
COPYRIGHT include jre LICENSE README.html src.zip THIRDPARTYLICENSEREADME.txt
[root@master-node ~]#
发现指向的路径并没有错,那就可能是忘记在profile里写export了,于是在profile的末尾加上了这一句:
export JAVA_HOME JAVA_BIN JRE_HOME PATH CLASSPATH
使用source命令重新加载了profile之后,重新启动es服务,但是依旧启动不起来,于是我发现我忽略了一条错误日志:
搭建ELK日志分析平台(上)—— ELK介绍及搭建 Elasticsearch 分布式集群
这是无法在环境变量中找到java可执行文件,那就好办了,做一个软链接过去即可:
[root@master-node ~]# ln -s /usr/local/jdk1.8/bin/java /usr/bin/
再次启动es服务,这次就终于启动成功了:
[root@master-node ~]# systemctl restart elasticsearch.service
[root@master-node ~]# ps aux |grep elasticsearch
elastic+ 2655 9.4 31.8 3621592 1231396 ? Ssl 15:42 0:14 /bin/java -Xms1g -Xmx1g -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -XX:+AlwaysPreTouch -Xss1m -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djna.nosys=true -XX:-OmitStackTraceInFastThrow -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true -Djava.io.tmpdir=/tmp/elasticsearch.4M9NarAc -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/lib/elasticsearch -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime -Xloggc:/var/log/elasticsearch/gc.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=32 -XX:GCLogFileSize=64m -Des.path.home=/usr/share/elasticsearch -Des.path.conf=/etc/elasticsearch -cp /usr/share/elasticsearch/lib/* org.elasticsearch.bootstrap.Elasticsearch -p /var/run/elasticsearch/elasticsearch.pid --quiet
root 2735 0.0 0.0 112660 968 pts/0 S+ 15:44 0:00 grep --color=auto elasticsearch
[root@master-node ~]# netstat -lntp |grep java # es服务会监听两个端口
tcp6 0 0 :::9200 :::* LISTEN 2655/java
tcp6 0 0 :::9300 :::* LISTEN 2655/java
[root@master-node ~]#
9300端口是集群通信用的,9200则是数据传输时用的。
主节点启动成功后,依次启动其他节点即可,我这里其他节点都是启动正常的。
curl查看es集群情况
集群的健康检查:
[root@master-node ~]# curl '192.168.77.128:9200/_cluster/health?pretty'
{
"cluster_name" : "master-node",
"status" : "green", # 为green则代表健康没问题,如果是yellow或者red则是集群有问题
"timed_out" : false, # 是否有超时
"number_of_nodes" : 3, # 集群中的节点数量
"number_of_data_nodes" : 2, # 集群中data节点的数量
"active_primary_shards" : 0,
"active_shards" : 0,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
[root@master-node ~]#
查看集群的详细信息:
[root@master-node ~]# curl '192.168.77.128:9200/_cluster/state?pretty'
{
"cluster_name" : "master-node",
"compressed_size_in_bytes" : 354,
"version" : 4,
"state_uuid" : "QkECzZHVQJOXB7K_9CgXYQ",
"master_node" : "SGez5oKUTa2eIijLp8MsLQ",
"blocks" : { },
"nodes" : {
"4sJURH6cTsykgLberJ6pVw" : {
"name" : "data-node1",
"ephemeral_id" : "t16_uw92T5ajJqp2HWodrg",
"transport_address" : "192.168.56.128:9300",
"attributes" : { }
},
"SGez5oKUTa2eIijLp8MsLQ" : {
"name" : "master",
"ephemeral_id" : "eJZX20tpSNyJCHgBIC4x4Q",
"transport_address" : "192.168.77.128:9300",
"attributes" : { }
},
"nZ4L5-KwSdirnluhJTGn7Q" : {
"name" : "data-node2",
"ephemeral_id" : "pQENcoUcRY6fTs7SamX2KQ",
"transport_address" : "192.168.77.134:9300",
"attributes" : { }
}
},
"metadata" : {
"cluster_uuid" : "jZPv-awrQDe163Nu3y2hHw",
"templates" : { },
"indices" : { },
"index-graveyard" : {
"tombstones" : [ ]
}
},
"routing_table" : {
"indices" : { }
},
"routing_nodes" : {
"unassigned" : [ ],
"nodes" : {
"nZ4L5-KwSdirnluhJTGn7Q" : [ ],
"4sJURH6cTsykgLberJ6pVw" : [ ]
}
},
"restore" : {
"snapshots" : [ ]
},
"snapshot_deletions" : {
"snapshot_deletions" : [ ]
},
"snapshots" : {
"snapshots" : [ ]
}
}
[root@master-node ~]#