• 配置logstash消费kafka多个topic,分别生成索引


    filebeat配置多个topic

    filebeat.prospectors:
    
    - input_type: log
      encoding: GB2312
    #  fields_under_root: true
      fields:  ##添加字段
        serverip: 192.168.1.10
        logtopic: wap
      enabled: True
      paths:
            - /app/wap/logs/catalina.out
      multiline.pattern: '^['  #java报错过滤
      multiline.negate:  true
      multiline.match: after
      tail_files: false
    - input_type: log
      encoding: GB2312
     # fields_under_root: true
      fields:  ##添加字段
        serverip: 192.168.1.10
        logtopic: api
      enabled: True
      paths:
            - /app/api/logs/catalina.out
      multiline.pattern: '^['  #java报错过滤
      multiline.negate:  true
      multiline.match: after
      tail_files: false
    #----------------------------- Logstash output --------------------------------
    output.kafka:
      enabled: true
      hosts: ["192.168.16.222:9092","192.168.16.237:9092","192.168.16.238:9092"]
      topic: 'elk-%{[fields.logtopic]}' ##匹配fileds字段下的logtopic
      partition.hash:
        reachable_only: true
      compression: gzip
      max_message_bytes: 1000000
      required_acks: 1
    logging.to_files: true
    
    

    查看是否输出到kafka

    $  bin/kafka-topics.sh --list --zookeeper kafka-01:2181, kafka-02:2181,kafka-03:2181
    elk-wap
    elk-api
    
    

    配置logstash集群

    input{
      kafka{
        bootstrap_servers => "kafka-01:9092,kafka-02:9092,kafka-03:9092"
        topics_pattern  => "elk-.*"
        consumer_threads => 5
        decorate_events => true
        codec => "json"
        auto_offset_reset => "latest"
        group_id => "logstash1"##logstash 集群需相同
    
    }
    }
    filter {
            ruby {
            code => "event.timestamp.time.localtime"
          }
    
    
            mutate {
            remove_field => ["beat"]
        }
            grok {
                 match => {"message" => "[(?<time>d+-d+-d+sd+:d+:d+)] [(?<level>w+)] (?<thread>[w|-]+) (?<class>[w|.]+) (?<lineNum>d+):(?<msg>.+)"
     }
    
    }
    }
    output {
       elasticsearch {
             hosts => ["192.168.16.221:9200","192.168.16.251:9200","192.168.16.252:9200"]
       #      index => "%{[fields][logtopic}" ##直接在日志中匹配,索引会去掉elk
             index =>  "%{[@metadata][topic]}-%{+YYYY-MM-dd}" 
    }    
        stdout {
            codec => rubydebug
        }
    
    

    logstash集群配置

    一机多实例,同一个配置文件,启动时只需更改数据路径
    ./bin/logstash -f test.conf --path.data=/usr/local/logdata/
    多台机器
    logstash配置文件group_id 相同即可
    
    
  • 相关阅读:
    字符串的操作
    vue2.0状态
    vue2.0 Loding组件(收集转载)
    动态加载js文件,并在加载成功后执行回调函数
    Vue2.0 keep-alive 后组件不使用缓存
    前端必要知识汇总
    Vue2.0的群组路由
    video视频限时观看
    iOS军火库-好用的ActionSheetView
    我的网络层是这么设计的
  • 原文地址:https://www.cnblogs.com/sanduzxcvbnm/p/13132553.html
Copyright © 2020-2023  润新知