Elasticsearch 入门(一)使用docker-compose 部署ELK
docker建议使用镜像源
编辑docker配置文件 vim /etc/docker/daemon.json
{
"registry-mirrors": ["http://hub-mirror.c.163.com"]
}
docker 安装 elasticsearch
找到elasticsearch官方镜像
https://hub.docker.com/_/elasticsearch
docker pull
docker pull elasticsearch:7.7.0
network
docker network create esnetwork
docker run
docker run -d --name elasticsearch --net eswork -v /root/essearch/plugins:/plugins -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:7.7.0
测试是否可以使用
打开浏览器 http://yourip:9200/
- Elasticsearch 的默认账户为 elastic 默认密码为 changeme
安装 kibana
找到kibana官方镜像
https://hub.docker.com/_/kibana
docker pull
docker pull kibana:7.7.0
docker run
docker run -d --name kibana --net esnetwork -p 5601:5601 kibana:7.7.0
测试是否可以使用
打开浏览器 http://yourip:5601/app/kibana#/home
docker network inspect eswork
- 172.18.0.2
配置 kibana
使用 docker exec -it kibana bash
cd ./config/kibana.yml
编辑 kibana
# Default Kibana configuration for docker target
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://172.18.0.2:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
elasticsearch.username: "elastic"
elasticsearch.password: "changeme"
i18n.locale: "zh-CN"
docker image prune -a -f
安装 Filebeat
- https://hub.docker.com/_/filebeat
- docker pull elastic/filebeat:7.7.0
- docker run --name filebeat --net eswork elastic/filebeat:7.7.0 setup -E setup.kibana.host=http://172.18.0.3:5601 -E output.elasticsearch.hosts=["http://172.18.0.2:9200"]
配置 filebeat.docker.yml
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
filebeat.autodiscover:
providers:
- type: docker
hints.enabled: true
processors:
- add_cloud_metadata: ~
output.elasticsearch:
hosts: '${ELASTICSEARCH_HOSTS:172.18.0.2:9200}'
username: '${ELASTICSEARCH_USERNAME:}'
password: '${ELASTICSEARCH_PASSWORD:}'
使用 docker-compose 部署
- cd /usr/share/essearch
- 创建并编辑 docker-compose.yml
version: "3.2"
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.7.0
environment:
- "discovery.type=single-node"
volumes:
- data01:/usr/share/elasticsearch/data
- ./plugins:/usr/share/elasticsearch/plugins
container_name: elasticsearch
hostname: elasticsearch
restart: always
ports:
- "9200:9200"
- "9300:9300"
networks:
- elk
kibana:
image: docker.elastic.co/kibana/kibana:7.7.0
environment:
I18N_LOCALE: zh-CN
container_name: kibana
hostname: kibana
restart: always
ports:
- "5601:5601"
networks:
- elk
depends_on:
- elasticsearch
logstash:
image: docker.elastic.co/logstash/logstash:7.7.0
container_name: logstash
hostname: logstash
restart: always
volumes:
- type: bind
source: "./logstash/logstash_stdout.conf"
target: "/usr/share/logstash/pipeline/logstash.conf"
ports:
- 9600:9600
- 5044:5044
networks:
- elk
depends_on:
- elasticsearch
filebeat:
image: docker.elastic.co/beats/filebeat:7.7.0
container_name: filebeat
hostname: filebeat
restart: always
volumes:
- type: bind
source: "./filebeat/filebeat.yml"
target: "/usr/share/filebeat/filebeat.yml"
networks:
- elk
depends_on:
- logstash
volumes:
data01:
driver: local
networks:
elk:
driver: bridge
- filebeat.yml
filebeat.inputs:
- type: log
paths:
- /*.log
output.logstash:
# The Logstash hosts
hosts: ["logstash:5044"]
- logstash_stdout.conf
input {
beats {
port => 5044
host => "0.0.0.0"
}
}
output {
elasticsearch{
hosts => ["elasticsearch:9200"]
manage_template => false
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
}
# stdout { codec => rubydebug } # 假如有问题,可以打开此行进行调试
}
简化 docker-compose 部署
在项目使用中发现,elasticsearch+kibana已经完全足够使用,为了节约服务器资源使用,简化配置docker-compose.yml
docker-compose.yml
version: "3.2"
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0
environment:
- "xpack.security.enabled=true"
- "discovery.type=single-node"
volumes:
- data01:/usr/share/elasticsearch/data
- ./plugins:/usr/share/elasticsearch/plugins
container_name: elasticsearch
hostname: elasticsearch
restart: always
ports:
- "9200:9200"
- "9300:9300"
networks:
- elk
kibana:
image: docker.elastic.co/kibana/kibana:7.8.0
environment:
I18N_LOCALE: zh-CN
container_name: kibana
hostname: kibana
restart: always
ports:
- "5601:5601"
networks:
- elk
depends_on:
- elasticsearch
volumes:
data01:
driver: local
networks:
elk:
driver: bridge