ch.qos.logback.ext.loggly.LogglyBatchAppender Maven / Gradle / Ivy
The newest version!
/**
* Copyright (C) 2014 The logback-extensions developers ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ch.qos.logback.ext.loggly;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.lang.management.ManagementFactory;
import java.net.HttpURLConnection;
import java.net.URL;
import java.nio.charset.Charset;
import java.sql.Timestamp;
import java.util.concurrent.BlockingDeque;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import ch.qos.logback.ext.loggly.io.DiscardingRollingOutputStream;
import ch.qos.logback.ext.loggly.io.IoUtils;
/**
*
* Logback batch appender for Loggly HTTP API.
*
* Note:Loggly's Syslog API is much more scalable than the HTTP API which should mostly be used in
* low-volume or non-production systems. The HTTP API can be very convenient to workaround firewalls.
* If the {@link LogglyBatchAppender} saturates and discards log messages, the following warning message is
* appended to both Loggly and {@link System#err}:
* "$date - OutputStream is full, discard previous logs
"
* Configuration settings
*
*
* Property Name
* Type
* Description
*
*
* inputKey
* String
* Loggly input key. "inputKey
" or endpointUrl
is required. Sample
* "12345678-90ab-cdef-1234-567890abcdef
"
*
*
* endpointUrl
* String
* Loggly HTTP API endpoint URL. "inputKey
" or endpointUrl
is required. Sample:
* "https://logs.loggly.com/inputs/12345678-90ab-cdef-1234-567890abcdef
"
*
*
* pattern
* String
* Pattern used for Loggly log messages. Default value is:
* %d{"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'",UTC} %-5level [%thread] %logger: %m%n
.
*
*
* proxyHost
* String
* hostname of a proxy server. If blank, no proxy is used (See {@link URL#openConnection(java.net.Proxy)}.
*
*
* proxyPort
* int
* port of a proxy server. Must be a valid int but is ignored if proxyHost
is blank or null.
*
*
* jmxMonitoring
* boolean
* Enable registration of a monitoring MBean named
* "ch.qos.logback:type=LogglyBatchAppender,name=LogglyBatchAppender@#hashcode#
". Default: true
.
*
*
* maxNumberOfBuckets
* int
* Max number of buckets of in the byte buffer. Default value: 8
.
*
*
* maxBucketSizeInKilobytes
* int
* Max size of each bucket. Default value: 1024
Kilobytes (1MB).
*
*
* flushIntervalInSeconds
* int
* Interval of the buffer flush to Loggly API. Default value: 3
.
*
*
* connReadTimeoutSeconds
* int
* How Long the HTTP Connection will wait on reads. Default value: 1
second.
*
*
* Default configuration consumes up to 8 buffers of 1024 Kilobytes (1MB) each, which seems very reasonable even for small JVMs.
* If logs are discarded, try first to shorten the flushIntervalInSeconds
parameter to "2s" or event "1s".
*
* Configuration Sample
*
* <configuration scan="true" scanPeriod="30 seconds" debug="true">
* <if condition='isDefined("logback.loggly.inputKey")'>
* <then>
* <appender name="loggly" class="ch.qos.logback.ext.loggly.LogglyBatchAppender">
* <inputKey>${logback.loggly.inputKey}</inputKey>
* <pattern>%d{yyyy/MM/dd HH:mm:ss,SSS} [${HOSTNAME}] [%thread] %-5level %logger{36} - %m %throwable{5}%n</pattern>
* <proxyHost>${logback.loggly.proxy.host:-}</proxyHost>
* <proxyPort>${logback.loggly.proxy.port:-8080}</proxyPort>
* <debug>${logback.loggly.debug:-false}</debug>
* </appender>
* <root level="WARN">
* <appender-ref ref="loggly"/>
* </root>
* </then>
* </if>
* </configuration>
*
*
*
* Implementation decisions
*
* - Why buffer the generated log messages as bytes instead of using the
* {@code ch.qos.logback.core.read.CyclicBufferAppender} and buffering the {@code ch.qos.logback.classic.spi.ILoggingEvent} ?
* Because it is much easier to control the size in memory
* -
* Why buffer in a byte array instead of directly writing in a {@link BufferedOutputStream} on the {@link HttpURLConnection} ?
* Because the Loggly API may not like such kind of streaming approach.
*
*
*
* @author Cyrille Le Clerc
*/
public class LogglyBatchAppender extends AbstractLogglyAppender implements LogglyBatchAppenderMBean {
public static final String ENDPOINT_URL_PATH = "bulk/";
private boolean debug = false;
private int flushIntervalInSeconds = 3;
private DiscardingRollingOutputStream outputStream;
protected final AtomicLong sendDurationInNanos = new AtomicLong();
protected final AtomicLong sentBytes = new AtomicLong();
protected final AtomicInteger sendSuccessCount = new AtomicInteger();
protected final AtomicInteger sendExceptionCount = new AtomicInteger();
private ScheduledExecutorService scheduledExecutor;
private boolean jmxMonitoring = true;
private MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
private ObjectName registeredObjectName;
private int maxNumberOfBuckets = 8;
private int maxBucketSizeInKilobytes = 1024;
private Charset charset = Charset.forName("UTF-8");
/* Store Connection Read Timeout */
private int connReadTimeoutSeconds = 1;
@Override
protected void append(E eventObject) {
if (!isStarted()) {
return;
}
String msg = this.layout.doLayout(eventObject);
// Issue #21: Make sure messages end with new-line to delimit
// individual log events within the batch sent to loggly.
if (!msg.endsWith("\n")) {
msg += "\n";
}
try {
outputStream.write(msg.getBytes(charset));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public void start() {
// OUTPUTSTREAM
outputStream = new DiscardingRollingOutputStream(
maxBucketSizeInKilobytes * 1024,
maxNumberOfBuckets) {
@Override
protected void onBucketDiscard(ByteArrayOutputStream discardedBucket) {
if (isDebug()) {
addInfo("Discard bucket - " + getDebugInfo());
}
String s = new Timestamp(System.currentTimeMillis()) + " - OutputStream is full, discard previous logs" + LINE_SEPARATOR;
try {
getFilledBuckets().peekLast().write(s.getBytes(charset));
addWarn(s);
} catch (IOException e) {
addWarn("Exception appending warning message '" + s + "'", e);
}
}
@Override
protected void onBucketRoll(ByteArrayOutputStream rolledBucket) {
if (isDebug()) {
addInfo("Roll bucket - " + getDebugInfo());
}
}
};
// SCHEDULER
ThreadFactory threadFactory = new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = Executors.defaultThreadFactory().newThread(r);
thread.setName("logback-loggly-appender");
thread.setDaemon(true);
return thread;
}
};
scheduledExecutor = Executors.newSingleThreadScheduledExecutor(threadFactory);
scheduledExecutor.scheduleWithFixedDelay(new LogglyExporter(), flushIntervalInSeconds, flushIntervalInSeconds, TimeUnit.SECONDS);
// MONITORING
if (jmxMonitoring) {
String objectName = "ch.qos.logback:type=LogglyBatchAppender,name=LogglyBatchAppender@" + System.identityHashCode(this);
try {
registeredObjectName = mbeanServer.registerMBean(this, new ObjectName(objectName)).getObjectName();
} catch (Exception e) {
addWarn("Exception registering mbean '" + objectName + "'", e);
}
}
// super.setOutputStream() must be defined before calling super.start()
super.start();
}
@Override
public void stop() {
scheduledExecutor.shutdown();
processLogEntries();
if (registeredObjectName != null) {
try {
mbeanServer.unregisterMBean(registeredObjectName);
} catch (Exception e) {
addWarn("Exception unRegistering mbean " + registeredObjectName, e);
}
}
try {
scheduledExecutor.awaitTermination(2 * this.flushIntervalInSeconds, TimeUnit.SECONDS);
} catch (InterruptedException e) {
addWarn("Exception waiting for termination of LogglyAppender scheduler", e);
}
// stop appender (ie close outputStream) after sending it to Loggly
outputStream.close();
super.stop();
}
/**
* Send log entries to Loggly
*/
@Override
public void processLogEntries() {
if (isDebug()) {
addInfo("Process log entries - " + getDebugInfo());
}
outputStream.rollCurrentBucketIfNotEmpty();
BlockingDeque filledBuckets = outputStream.getFilledBuckets();
ByteArrayOutputStream bucket;
while ((bucket = filledBuckets.poll()) != null) {
try {
InputStream in = new ByteArrayInputStream(bucket.toByteArray());
processLogEntries(in);
} catch (Exception e) {
addWarn("Internal error", e);
}
outputStream.recycleBucket(bucket);
}
}
/**
* Creates a configured HTTP connection to a URL (does not open the
* connection)
*
* @param url target URL
* @return the newly created HTTP connection
* @throws IOException connection error
*/
protected HttpURLConnection getHttpConnection(URL url) throws IOException {
HttpURLConnection conn;
if (proxy == null) {
conn = (HttpURLConnection) url.openConnection();
} else {
conn = (HttpURLConnection) url.openConnection(proxy);
}
conn.setDoOutput(true);
conn.setDoInput(true);
conn.setRequestProperty("Content-Type", layout.getContentType() + "; charset=" + charset.name());
conn.setRequestMethod("POST");
conn.setReadTimeout(getHttpReadTimeoutInMillis());
return conn;
}
/**
* Send log entries to Loggly
* @param in log input stream
*/
protected void processLogEntries(InputStream in) {
long nanosBefore = System.nanoTime();
try {
HttpURLConnection conn = getHttpConnection(new URL(endpointUrl));
/* Set connection Read Timeout */
conn.setReadTimeout(connReadTimeoutSeconds*1000);
BufferedOutputStream out = new BufferedOutputStream(conn.getOutputStream());
long len = IoUtils.copy(in, out);
sentBytes.addAndGet(len);
out.flush();
out.close();
int responseCode = conn.getResponseCode();
String response = super.readResponseBody(conn.getInputStream());
switch (responseCode) {
case HttpURLConnection.HTTP_OK:
case HttpURLConnection.HTTP_ACCEPTED:
sendSuccessCount.incrementAndGet();
break;
default:
sendExceptionCount.incrementAndGet();
addError("LogglyAppender server-side exception: " + responseCode + ": " + response);
}
// force url connection recycling
try {
conn.getInputStream().close();
conn.disconnect();
} catch (Exception e) {
// swallow exception
}
} catch (Exception e) {
sendExceptionCount.incrementAndGet();
addError("LogglyAppender client-side exception", e);
} finally {
sendDurationInNanos.addAndGet(System.nanoTime() - nanosBefore);
}
}
public int getFlushIntervalInSeconds() {
return flushIntervalInSeconds;
}
public void setFlushIntervalInSeconds(int flushIntervalInSeconds) {
this.flushIntervalInSeconds = flushIntervalInSeconds;
}
@Override
public long getSentBytes() {
return sentBytes.get();
}
@Override
public long getSendDurationInNanos() {
return sendDurationInNanos.get();
}
@Override
public int getSendSuccessCount() {
return sendSuccessCount.get();
}
@Override
public int getSendExceptionCount() {
return sendExceptionCount.get();
}
@Override
public int getDiscardedBucketsCount() {
return outputStream.getDiscardedBucketCount();
}
@Override
public long getCurrentLogEntriesBufferSizeInBytes() {
return outputStream.getCurrentOutputStreamSize();
}
public void setDebug(boolean debug) {
this.debug = debug;
}
public boolean isDebug() {
return debug;
}
public void setJmxMonitoring(boolean jmxMonitoring) {
this.jmxMonitoring = jmxMonitoring;
}
public void setMbeanServer(MBeanServer mbeanServer) {
this.mbeanServer = mbeanServer;
}
public void setMaxNumberOfBuckets(int maxNumberOfBuckets) {
this.maxNumberOfBuckets = maxNumberOfBuckets;
}
public void setMaxBucketSizeInKilobytes(int maxBucketSizeInKilobytes) {
this.maxBucketSizeInKilobytes = maxBucketSizeInKilobytes;
}
/**
* set method for Logback to allow Connection Read Timeout to be exposed
*/
public void setConnReadTimeoutSeconds(int connReadTimeoutSeconds) {
this.connReadTimeoutSeconds = connReadTimeoutSeconds;
}
private String getDebugInfo() {
return "{" +
"sendDurationInMillis=" + TimeUnit.MILLISECONDS.convert(sendDurationInNanos.get(), TimeUnit.NANOSECONDS) +
", sendSuccessCount=" + sendSuccessCount +
", sendExceptionCount=" + sendExceptionCount +
", sentBytes=" + sentBytes +
", discardedBucketsCount=" + getDiscardedBucketsCount() +
", currentLogEntriesBufferSizeInBytes=" + getCurrentLogEntriesBufferSizeInBytes() +
'}';
}
public class LogglyExporter implements Runnable {
@Override
public void run() {
try {
processLogEntries();
} catch (Exception e) {
addWarn("Exception processing log entries", e);
}
}
}
@Override
protected String getEndpointPrefix() {
return ENDPOINT_URL_PATH;
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy