• logstash收集日志并写入kafka再到es集群


    条件:
    有kafka环境

    图形架构:

    环境准备

    172.31.2.101 es1 + kibana
    172.31.2.102 es2
    172.31.2.103 es3
    172.31.2.104 logstash1
    172.31.2.105 logstash2
    172.31.2.41 zookeeper + kafka
    172.31.2.42 zookeeper + kafka
    172.31.2.43 zookeeper + kafka
    172.31.2.107 web1
    

    先启动zookeeper

    [root@mq1 ~]# /usr/local/zookeeper/bin/zkServer.sh restart
    [root@mq2 ~]# /usr/local/zookeeper/bin/zkServer.sh restart
    [root@mq3 ~]# /usr/local/zookeeper/bin/zkServer.sh restart
    

    启动kafka

    [root@mq1 ~]# /apps/kafka/bin/kafka-server-start.sh -daemon /apps/kafka/config/server.properties
    
    [root@mq2 ~]# /apps/kafka/bin/kafka-server-start.sh -daemon /apps/kafka/config/server.properties
    
    [root@mq3 ~]# /apps/kafka/bin/kafka-server-start.sh -daemon /apps/kafka/config/server.properties
    

    查看端口

    [root@mq1 ~]# ss -tanl | grep 9092
    
    LISTEN  0        50          [::ffff:172.31.2.41]:9092                 *:*
    

    web服务器改配置写入到kafka

    [root@es-web1 ~]# cat /etc/logstash/conf.d/kafka-nginx-es.conf
    
    input {
      file {
        path => "/var/log/nginx/access.log"
        start_position => "beginning"
        stat_interval => 3
        type => "nginx-accesslog"
        codec => "json"
     }
    
     file {
      path => "/apps/nginx/logs/error.log"
      start_position => "beginning"
      stat_interval => 3
      type => "nginx-errorlog"
      }
    }
    
    output {
      if [type] == "nginx-accesslog" {
        kafka {
          bootstrap_servers => "172.31.2.41:9092"
          topic_id => "long-linux21-accesslog"
          codec => "json"
      }}
    
      if [type] == "nginx-errorlog" {
        kafka {
          bootstrap_servers => "172.31.2.41:9092"
          topic_id => "long-linux21-errorlog"
          #codec => "json"
      }}
    }
    

    重启

    root@long:~# systemctl restart logstash
    

    在logstash服务器配置写入elasticsearch

    [root@logstash1 ~]# cat /etc/logstash/conf.d/kafka-to-es.conf
    
    input {
      kafka {
        bootstrap_servers => "172.31.2.41:9092,172.31.2.42:9092,172.31.2.43:9092"
        topics => "long-linux21-accesslog"
        codec => "json"
     }
    
      kafka {
        bootstrap_servers => "172.31.2.41:9092,172.31.2.42:9092,172.31.2.43:9092"
        topics => "long-linux21-errorlog"
        codec => "json"
      }
    }
    
    output {
      if [type] == "nginx-accesslog" {
        elasticsearch {
          hosts => ["172.31.2.101:9200"]
          index => "n19-long-kafka-nginx-accesslog-%{+YYYY.MM.dd}"
      }}
    
      if [type] == "nginx-errorlog" {
        elasticsearch {
          hosts => ["172.31.2.101:9200"]
          index => "n17-long-kafka-nginx-errorlog-%{+YYYY.MM.dd}"
      }}
    }
    

    测试

    [root@logstash1 ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/kafka-to-es.conf -t
    

    重启

    root@long:~# systemctl restart logstash
    

    kafak工具

    写入kibana

  • 相关阅读:
    spring
    google-c-style
    process想停就停,真爽
    mytop
    Java 图片设置圆角(设置边框,旁白)
    当setTimeout遇到闭包
    FreeMarker辅助
    ImageIO.wtrie生成jpg图片质量损失方案:BufferedImage生成jpg图片文件流
    从BufferedImage到InputStream,实现绘图后进行下载(生成二维码图片并下载)
    使用Javascript 实现类
  • 原文地址:https://www.cnblogs.com/xuanlv-0413/p/15374798.html
Copyright © 2020-2023  润新知