All Downloads are FREE. Search and download functionalities are using the official Maven repository.

edu.byu.hbll.logback.kafka.KafkaAppender Maven / Gradle / Ivy

The newest version!
package edu.byu.hbll.logback.kafka;

import ch.qos.logback.classic.PatternLayout;
import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.AppenderBase;
import java.io.IOException;
import java.io.StringReader;
import java.util.Objects;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;

/**
 * Logback Appender for Kafka.
 *
 * 

This class provides an implementation of the Logback Appender that sends logs to kafka. * Another application may then read the logs from kafka. * *

Every component of this appender is configurable from logback.xml. By default, messages sent * to kafka are compressed with lz4 compression, have a 64k batch size, and have a 100ms batch * delay. * *

The KafkaAppender uses an internal queue for storing messages waiting to be transmitted to * Kafka. This allows calls to append() to return immediately, instead of blocking until the message * is sent to kafka. This is critical with newer versions of the kafka-clients because the * KafkaProducer will itself send messages to the log. Without the queue, the KafkaProducer ends up * waiting on the Appender to log something, which itself is waiting on the KafkaProducer to finish * sending a message. If the queue is full, logging events will be sent to std:out. * *

By default, the internal KafkaProducer will only block for 1000ms, after which the message * will be send to std:out. This allows the KafkaAppender to interrupt and close the KafkaProducer * if a shutdown signal is received. */ public class KafkaAppender extends AppenderBase { public static final int DEFAULT_MAX_BATCH_DELAY = 100; public static final int DEFAULT_MAX_BATCH_SIZE = 65536; public static final String DEFAULT_COMPRESSION_TYPE = "lz4"; public static final int DEFAULT_MAX_BLOCK_MS = 1000; public static final int DEFAULT_QUEUE_CAPACITY = 10000; private int batchSize = DEFAULT_MAX_BATCH_SIZE; private int maxBatchDelay = DEFAULT_MAX_BATCH_DELAY; private int maxBlockMillis = DEFAULT_MAX_BLOCK_MS; private String compressionType = DEFAULT_COMPRESSION_TYPE; private int queueCapacity = DEFAULT_QUEUE_CAPACITY; private boolean useSsl = false; private String kafkaServers; private String kafkaProducerProperties; private String topic; private String logName; private PatternLayout layout; private KafkaProducer producer; private BlockingQueue messages; private Thread thread; private boolean run = true; @Override public void start() { super.start(); // Debugging info sent to std:out in case kafka isn't working. System.out.println("Creating KafkaAppender - "); System.out.println("\tbatchSize - " + batchSize); System.out.println("\tmaxBatchDelay - " + maxBatchDelay); System.out.println("\tmaxBlockMillis - " + maxBlockMillis); System.out.println("\tcompressionType - " + compressionType); System.out.println("\tqueueCapacity - " + queueCapacity); System.out.println("\tuseSsl - " + useSsl); System.out.println("\tkafkaServers - " + kafkaServers); System.out.println("\tkafkaProducerProperties - " + kafkaProducerProperties); System.out.println("\ttopic - " + topic); System.out.println("\tlogName - " + logName); // Construct the properties configuration for the KafkaProducer. Properties properties = new Properties(); properties.put( ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()); properties.put( ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()); properties.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType); properties.put(ProducerConfig.LINGER_MS_CONFIG, maxBatchDelay); properties.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, maxBlockMillis); if (kafkaServers != null) { properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers); } if (useSsl) { properties.put("security.protocol", "SSL"); } if (kafkaProducerProperties != null) { StringReader sreader = new StringReader(kafkaProducerProperties); try { properties.load(sreader); } catch (IOException e) { System.out.println("Error initializing KafkaAppender"); e.printStackTrace(); throw new IllegalStateException("Error initializing KafkaAppender", e); } } Objects.requireNonNull(topic, "Topic must not be null. Add this to the appender configuration"); producer = new KafkaProducer<>(properties); messages = new LinkedBlockingQueue<>(queueCapacity); // For some reason, the first message sent to kafka gets missed. This fixes that. messages.add(""); // Construct the thread that sends messages to kafka. thread = Executors.defaultThreadFactory() .newThread( () -> { String message = null; while (run) { try { message = messages.take(); ProducerRecord record = new ProducerRecord<>(topic, logName, message); producer.send(record); } catch (InterruptedException e) { // Do Nothing } catch (Exception e) { // Send the failed message to std:out and the exception to std:err System.out.println("Error sending message to kafka - " + message); e.printStackTrace(System.err); } } producer.close(); }); thread.start(); } @Override public void stop() { super.stop(); run = false; thread.interrupt(); layout.stop(); } @Override protected void append(ILoggingEvent event) { String message = layout.doLayout(event); // Try adding the message to the queue. If it's full, send it to std:out. if (!messages.offer(message)) { System.out.println("KafkaAppender Queue Full. Message - " + message); } } public String getTopic() { return topic; } public void setTopic(String topic) { this.topic = topic; } public String getKafkaProducerProperties() { return kafkaProducerProperties; } public void setKafkaProducerProperties(String kafkaProducerProperties) { this.kafkaProducerProperties = kafkaProducerProperties; } public String getLogName() { return logName; } public void setLogName(String logName) { this.logName = logName; } public PatternLayout getLayout() { return layout; } public void setLayout(PatternLayout layout) { this.layout = layout; } public String getKafkaServers() { return kafkaServers; } public void setKafkaServers(String kafkaServers) { this.kafkaServers = kafkaServers; } public int getBatchSize() { return batchSize; } public void setBatchSize(int batchSize) { this.batchSize = batchSize; } public int getMaxBatchDelay() { return maxBatchDelay; } public void setMaxBatchDelay(int maxBatchDelay) { this.maxBatchDelay = maxBatchDelay; } public int getMaxBlockMillis() { return maxBlockMillis; } public void setMaxBlockMillis(int maxBlockMillis) { this.maxBlockMillis = maxBlockMillis; } public String getCompressionType() { return compressionType; } public void setCompressionType(String compressionType) { this.compressionType = compressionType; } public boolean isUseSsl() { return useSsl; } public void setUseSsl(boolean useSsl) { this.useSsl = useSsl; } public int getQueueCapacity() { return queueCapacity; } public void setQueueCapacity(int queueCapacity) { this.queueCapacity = queueCapacity; } }





© 2015 - 2025 Weber Informatics LLC | Privacy Policy