
edu.byu.hbll.logback.kafka.KafkaAppender Maven / Gradle / Ivy
package edu.byu.hbll.logback.kafka;
import ch.qos.logback.classic.PatternLayout;
import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.AppenderBase;
import java.io.IOException;
import java.io.StringReader;
import java.util.Objects;
import java.util.Properties;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.serialization.StringSerializer;
/**
* Logback Appender for Kafka.
*
* This class provides an implementation of the Logback Appender that sends logs to kafka.
* Another application may then read the logs from kafka.
*
*
Every component of this appender is configurable from logback.xml. By default, messages sent
* to kafka are compressed with lz4 compression, have a 64k batch size, and have a 100ms batch
* delay.
*
*
By default, the internal KafkaProducer will also block for only 100ms, after which the message
* will be sent to std:out. The internal KafkaProducer is designed to add messages to a buffer and
* immediately return. If the buffer is full or a connection to the kafka brokers cannot be
* established, the KafkaProducer will block for up to 100ms before giving up and sending the
* message to std:out.
*
* @author rodzi
*/
public class KafkaAppender extends AppenderBase {
public static final int DEFAULT_MAX_BATCH_DELAY = 100;
public static final int DEFAULT_MAX_BATCH_SIZE = 65536;
public static final String DEFAULT_COMPRESSION_TYPE = "lz4";
public static final int DEFAULT_MAX_BLOCK_MS = 100;
private int batchSize = DEFAULT_MAX_BATCH_SIZE;
private int maxBatchDelay = DEFAULT_MAX_BATCH_DELAY;
private int maxBlockMillis = DEFAULT_MAX_BLOCK_MS;
private String compressionType = DEFAULT_COMPRESSION_TYPE;
private boolean useSsl = false;
private String kafkaServers;
private String kafkaProducerProperties;
private String topic;
private String logName;
private PatternLayout layout;
private KafkaProducer producer;
@Override
public void start() {
super.start();
Properties properties = new Properties();
properties.put(
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName());
properties.put(
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName());
properties.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType);
properties.put(ProducerConfig.LINGER_MS_CONFIG, maxBatchDelay);
properties.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, maxBlockMillis);
if (kafkaServers != null) {
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
}
if (useSsl) {
properties.put("security.protocol", "SSL");
}
if (kafkaProducerProperties != null) {
StringReader sreader = new StringReader(kafkaProducerProperties);
try {
properties.load(sreader);
} catch (IOException e) {
addError("Error initializing KafkaAppender");
addError(e.toString(), e);
throw new IllegalStateException("Error initializing KafkaAppender", e);
}
}
producer = new KafkaProducer<>(properties);
Objects.requireNonNull(topic, "Topic must not be null. Add this to the appender configuration");
}
@Override
public void stop() {
super.stop();
producer.close();
layout.stop();
}
@Override
protected void append(ILoggingEvent event) {
ProducerRecord record =
new ProducerRecord<>(topic, logName, layout.doLayout(event));
try {
producer.send(record);
} catch (TimeoutException e) {
addError("Kafka send timeout. Message: " + layout.doLayout(event));
} catch (Exception e) {
addError("KafkaAppender: Error sending message - " + e + " - " + e.getMessage());
e.printStackTrace();
}
}
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public String getKafkaProducerProperties() {
return kafkaProducerProperties;
}
public void setKafkaProducerProperties(String kafkaProducerProperties) {
this.kafkaProducerProperties = kafkaProducerProperties;
}
public String getLogName() {
return logName;
}
public void setLogName(String logName) {
this.logName = logName;
}
public PatternLayout getLayout() {
return layout;
}
public void setLayout(PatternLayout layout) {
this.layout = layout;
}
public String getKafkaServers() {
return kafkaServers;
}
public void setKafkaServers(String kafkaServers) {
this.kafkaServers = kafkaServers;
}
public int getBatchSize() {
return batchSize;
}
public void setBatchSize(int batchSize) {
this.batchSize = batchSize;
}
public int getMaxBatchDelay() {
return maxBatchDelay;
}
public void setMaxBatchDelay(int maxBatchDelay) {
this.maxBatchDelay = maxBatchDelay;
}
public int getMaxBlockMillis() {
return maxBlockMillis;
}
public void setMaxBlockMillis(int maxBlockMillis) {
this.maxBlockMillis = maxBlockMillis;
}
public String getCompressionType() {
return compressionType;
}
public void setCompressionType(String compressionType) {
this.compressionType = compressionType;
}
public boolean isUseSsl() {
return useSsl;
}
public void setUseSsl(boolean useSsl) {
this.useSsl = useSsl;
}
}