• 大数据技术之Flume 配置示例


    大数据技术之Flume 配置示例   1 & 3


    [root@bigdatacloud conf]# cat test1
    a1.sources = r1
    a1.sinks = k1
    a1.channels = c1

    # Describe/configure the source
    a1.sources.r1.type = netcat
    a1.sources.r1.bind =0.0.0.0
    a1.sources.r1.port = 44444

    # Describe the sink
    a1.sinks.k1.type = logger

    # Use a channel which buffers events in memory
    a1.channels.c1.type = memory
    a1.channels.c1.capacity = 1000
    a1.channels.c1.transactionCapacity = 100

    # Bind the source and sink to the channel
    a1.sources.r1.channels = c1
    a1.sinks.k1.channel = c1


    ====================

    [root@bigdatacloud conf]# cat test2
    a1.sources=r1
    a1.sinks=k1
    a1.channels=c1

    # Describe/configure the source
    a1.sources.r1.type=spooldir
    a1.sources.r1.spoolDir=/opt/sqooldir

    # Describe the sink
    a1.sinks.k1.type=avro
    a1.sinks.k1.hostname=bigdatastorm
    a1.sinks.k1.port=44444

    # Use a channel which buffers events in memory
    a1.channels.c1.type=memory
    a1.channels.c1.capacity=1000
    a1.channels.c1.transactionCapacity=100

    # Bind the source and sink to the channel
    a1.sources.r1.channels=c1
    a1.sinks.k1.channel=c1

    =========================

    [root@bigdatacloud conf]# cat test3
    a1.sources=r1
    a1.sinks=k1
    a1.channels=c1

    # Describe/configure the source
    a1.sources.r1.type=avro
    a1.sources.r1.bind=0.0.0.0
    a1.sources.r1.port=44444

    # Describe the sink
    a1.sinks.k1.type=hdfs
    a1.sinks.k1.hdfs.path=hdfs://mycluster/flume/data/%y-%m-%d
    a1.sinks.k1.hdfs.rollInterval=0
    a1.sinks.k1.hdfs.rollCount=0
    a1.sinks.k1.hdfs.rollSize=10240000
    a1.sinks.k1.hdfs.fileType=DataStream
    a1.sinks.k1.hdfs.idleTimeout=5
    a1.sinks.k1.hdfs.useLocalTimeStamp=true
    a1.sinks.k1.hdfs.callTimeout=10000

    #a1.sinks.k1.type=hdfs
    #a1.sinks.k1.type=hdfs
    #a1.sinks.k1.type=hdfs

    # Use a channel which buffers events in memory
    a1.channels.c1.type=memory
    a1.channels.c1.capacity=1000
    a1.channels.c1.transactionCapacity=100

    # Bind the source and sink to the channel
    a1.sources.r1.channels=c1
    a1.sinks.k1.channel=c1

    ====================

    [root@bigdatacloud conf]# cat getnginxlog
    a1.sources=r1
    a1.sinks=k1
    a1.channels=c1

    # Describe/configure the source
    #a1.sources.r1.type=avro
    a1.sources.r1.type=exec
    #a1.sources.r1.bind=0.0.0.0
    #a1.sources.r1.port=44444
    a1.sources.r1.command=tail -F /opt/first_project/data/access.log

    # Describe the sink
    a1.sinks.k1.type=hdfs
    a1.sinks.k1.hdfs.path=hdfs://mycluster/flume/data1/%y-%m-%d
    a1.sinks.k1.hdfs.rollInterval=0
    a1.sinks.k1.hdfs.rollCount=0
    a1.sinks.k1.hdfs.rollSize=10240000
    a1.sinks.k1.hdfs.fileType=DataStream
    a1.sinks.k1.hdfs.idleTimeout=5
    a1.sinks.k1.hdfs.useLocalTimeStamp=true
    a1.sinks.k1.hdfs.callTimeout=10000

    #a1.sinks.k1.type=hdfs
    #a1.sinks.k1.type=hdfs
    #a1.sinks.k1.type=hdfs

    # Use a channel which buffers events in memory
    a1.channels.c1.type=memory
    a1.channels.c1.capacity=1000
    a1.channels.c1.transactionCapacity=100

    # Bind the source and sink to the channel
    a1.sources.r1.channels=c1
    a1.sinks.k1.channel=c1


  • 相关阅读:
    2021-06-10 Summary: 阶段总结
    java中有符号和无符号数据类型发生转换
    关于数组和集合的冒泡排序中容易出现的IndexOutOfBoundsException
    Intellij IDEA打开多项目窗口
    使用Idea从github上获取项目
    用Intellij Idea从Github上获取代码
    Python-列表常用操作方法
    Python-字符串常用操作方法
    Python-不可变对象和可变对象的理解
    Python-内置数据类型
  • 原文地址:https://www.cnblogs.com/TendToBigData/p/10501492.html
Copyright © 2020-2023  润新知