• logback


    <logback-kafka-appender.version>0.2.0-RC2</logback-kafka-appender.version>
    
    
    <dependency>
                    <groupId>com.github.danielwegener</groupId>
                    <artifactId>logback-kafka-appender</artifactId>
                    <version>${logback-kafka-appender.version}</version>
                </dependency>
    <?xml version="1.0" encoding="UTF-8"?>
    <configuration>
        <include resource="org/springframework/boot/logging/logback/defaults.xml"/>
    
        <property name="ENCODER_PATTERN"
                  value="pi|${HOSTNAME}|%d{yyyy-MM-dd HH:mm:ss.SSS}|[%thread]|%-5level|%logger{80}|%L|%X{orgCode}|%X{bizType}|%X{jobId}|%X{lid}|%X{fromUuid}|%X{toUuid}|%X{traceId}|%msg%n"/>
    
        <springProperty scope="context" name="service" source="spring.application.name" defaultValue="UnknownService"/>
        <springProperty scope="context" name="env" source="elk.env" defaultValue="test"/>
        <springProperty scope="context" name="bootstrapServers" source="elk.kafka.bootstrap.servers" defaultValue="10.12.0.33:9092,10.12.0.32:9092,10.12.0.34:9092"/>
    
    
        <springProperty scope="context" name="springAppName" source="spring.application.name"/>
        <appender name="consoleAppender" class="ch.qos.logback.core.ConsoleAppender">
            <encoder>
                <pattern>${ENCODER_PATTERN}</pattern>
                <charset>utf8</charset>
            </encoder>
        </appender>
    
        <appender name="rollingFileAppender" class="ch.qos.logback.core.rolling.RollingFileAppender">
            <File>${LOG_PATH}/pi.log</File>
            <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
                <FileNamePattern>${LOG_PATH}/pi.%d{yyyy-MM-dd}.%i.log</FileNamePattern>
                <maxHistory>30</maxHistory>
                <totalSizeCap>50GB</totalSizeCap>
                <TimeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                    <MaxFileSize>100MB</MaxFileSize>
                </TimeBasedFileNamingAndTriggeringPolicy>
            </rollingPolicy>
            <encoder>
                <pattern>${ENCODER_PATTERN}</pattern>
                <charset>utf8</charset>
            </encoder>
        </appender>
    
        <appender name="ASYNC-rollingFileAppender" class="ch.qos.logback.classic.AsyncAppender">
            <!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
            <discardingThreshold>0</discardingThreshold>
            <!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
            <queueSize>256</queueSize>
            <!-- 添加附加的appender,最多只能添加一个 -->
            <appender-ref ref="rollingFileAppender"/>
        </appender>
    
        <appender name="kafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
            <encoder>
                <pattern>${ENCODER_PATTERN}</pattern>
                <charset>utf8</charset>
            </encoder>
            <topic>edgex</topic>
            <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/>
            <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>
            <producerConfig>acks=0</producerConfig>
            <producerConfig>linger.ms=1000</producerConfig>
            <producerConfig>max.block.ms=0</producerConfig>
            <producerConfig>bootstrap.servers=${bootstrapServers}</producerConfig>
        </appender>
    
        <appender name="ASYNC-kafkaAppender" class="ch.qos.logback.classic.AsyncAppender">
            <!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
            <discardingThreshold>0</discardingThreshold>
            <!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
            <queueSize>256</queueSize>
            <!-- 添加附加的appender,最多只能添加一个 -->
            <appender-ref ref="kafkaAppender"/>
        </appender>
    
        <logger name="com.netflix" level="WARN" additivity="false">
            <appender-ref ref="consoleAppender"/>
            <appender-ref ref="ASYNC-rollingFileAppender"/>
            <appender-ref ref="ASYNC-kafkaAppender" />
        </logger>
    
        <logger name="com.be.pi.interceptor.RequestResponseLoggingInterceptor" level="DEBUG" additivity="false">
            <appender-ref ref="consoleAppender"/>
            <appender-ref ref="ASYNC-rollingFileAppender"/>
            <appender-ref ref="ASYNC-kafkaAppender" />
        </logger>
    
        <root level="info">
            <appender-ref ref="consoleAppender"/>
            <appender-ref ref="ASYNC-rollingFileAppender"/>
            <appender-ref ref="ASYNC-kafkaAppender" />
        </root>
    
    </configuration>
  • 相关阅读:
    进程和程序
    linux socket基本知识
    window核心编程 第五章 作业
    树的基本操作(C语言)
    每天都在反省自己,但是每天却都浑浑噩噩
    Windows核心编程 内核对象
    还没完整看过一本技术的书籍啊
    管道
    Memory Layout of a C Program(7.6)
    cpio命令用法
  • 原文地址:https://www.cnblogs.com/exmyth/p/15870185.html
Copyright © 2020-2023  润新知