ento-logback-kafka.1.2.2-RELEASE.source-code.soento-logback-kafka.xml Maven / Gradle / Ivy
<?xml version="1.0" encoding="UTF-8"?> <included> <appender name="KAFKA" class="com.soento.logback.kafka.KafkaAppender"> <encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder"> <providers> <timestamp> <timeZone>Asia/Shanghai</timeZone> </timestamp> <pattern> <pattern> { "service_name": "${KAFKA_APP_NAME}", "timestamp": "%d{yyyy-MM-dd HH:mm:ss.SSS}", "thread": "%thread", "log_level": "%-5level", "class_name": "%logger{50}", "line_number": "%line", "message": "%message%n", "stack_trace": "%exception" } </pattern> </pattern> </providers> </encoder> <topic>${KAFKA_TOPIC}</topic> <!-- we don't care how the log messages will be partitioned --> <keyingStrategy class="com.soento.logback.kafka.keying.NoKeyKeyingStrategy"/> <!-- use async delivery. the application threads are not blocked by logging --> <deliveryStrategy class="com.soento.logback.kafka.delivery.AsynchronousDeliveryStrategy"/> <!-- each <producerConfig> translates to regular kafka-client config (format: key=value) --> <!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs --> <!-- bootstrap.servers is the only mandatory producerConfig --> <producerConfig>bootstrap.servers=${KAFKA_SERVERS}</producerConfig> <!-- don't wait for a broker to ack the reception of a batch. --> <producerConfig>acks=0</producerConfig> <!-- wait up to 1000ms and collect log messages before sending them as a batch --> <producerConfig>linger.ms=1000</producerConfig> <!-- even if the producer buffer runs full, do not block the application but start to drop messages --> <producerConfig>max.block.ms=0</producerConfig> <!-- define a client-id that you use to identify yourself against the kafka broker --> <producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig> <!-- there is no fallback <appender-ref>. If this appender cannot deliver, it will drop its messages. --> </appender> </included>