net.opentsdb.tsd.KafkaRpcPluginGroup Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of opentsdb-rpc-kafka Show documentation
Show all versions of opentsdb-rpc-kafka Show documentation
A consumer and publisher for OpenTSDB Kafka messages
// This file is part of OpenTSDB.
// Copyright (C) 2018 The OpenTSDB Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package net.opentsdb.tsd;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.jboss.netty.util.HashedWheelTimer;
import org.jboss.netty.util.Timeout;
import org.jboss.netty.util.TimerTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.util.concurrent.RateLimiter;
import joptsimple.internal.Strings;
import net.opentsdb.data.deserializers.Deserializer;
import net.opentsdb.utils.PluginLoader;
/**
* A group of consumers that pull from a list of one or more topics. This group
* monitors the running consumers periodically and restarts them as necessary.
*/
public class KafkaRpcPluginGroup implements TimerTask {
private static final Logger LOG = LoggerFactory.getLogger(
KafkaRpcPluginGroup.class);
/**
* An enumerator that maps TSDB consumer types to strings. This lets the
* consumer know how to process the messages from it's topics.
*/
public enum TsdbConsumerType {
RAW("Raw"),
REQUEUE_RAW("RequeueRaw"),
ROLLUP("Rollup"),
REQUEUE_ROLLUP("RequeueRollup"),
UID_ABUSE("UIDAbuse");
final String name;
TsdbConsumerType(final String name) {
this.name = name;
}
public String toString() {
return name;
}
}
private final KafkaRpcPluginConfig config;
private final String group_id;
private final RateLimiter rate_limiter;
private final List kafka_consumers;
private final int num_threads;
private final String topics;
private final HashedWheelTimer timer;
private final ExecutorService pool;
private final TsdbConsumerType consumer_type;
private final KafkaRpcPlugin parent;
private final AtomicLong restarts = new AtomicLong();
private final AtomicLong rebalance_failures = new AtomicLong();
private Deserializer deserializer = null;
private double current_rate;
/**
* Default CTor
* @param parent The parent object to pull shared objects from
* @param groupID The group ID used to subscribe to Kafka
*/
public KafkaRpcPluginGroup(final KafkaRpcPlugin parent, final String groupID) {
this.parent = parent;
this.group_id = groupID;
config = parent.getConfig();
timer = (HashedWheelTimer) parent.getTSDB().getTimer();
pool = parent.getKafkaPool();
if (groupID == null || groupID.isEmpty()) {
throw new IllegalArgumentException("Missing group name");
}
topics = config.getString(
KafkaRpcPluginConfig.PLUGIN_PROPERTY_BASE + groupID + ".topics");
if (topics == null || topics.isEmpty()) {
throw new IllegalArgumentException("Empty topic filter for group " + groupID);
}
final String type = config.getString(KafkaRpcPluginConfig.PLUGIN_PROPERTY_BASE
+ groupID + ".consumerType");
if (type == null || type.isEmpty()) {
throw new IllegalArgumentException("Missing consumer type for group " + groupID);
}
consumer_type = TsdbConsumerType.valueOf(type.toUpperCase());
if (config.hasProperty(KafkaRpcPluginConfig.PLUGIN_PROPERTY_BASE + groupID + ".rate")) {
current_rate = config.getInt(KafkaRpcPluginConfig.PLUGIN_PROPERTY_BASE +
groupID + ".rate");
if (current_rate > 0) {
// can't set the rate to zero
rate_limiter = RateLimiter.create(current_rate);
} else {
rate_limiter = RateLimiter.create(KafkaRpcPluginConfig.DEFAULT_CONSUMER_RATE);
}
} else {
current_rate = KafkaRpcPluginConfig.DEFAULT_CONSUMER_RATE;
rate_limiter = RateLimiter.create(KafkaRpcPluginConfig.DEFAULT_CONSUMER_RATE);
}
num_threads =
config.hasProperty(KafkaRpcPluginConfig.PLUGIN_PROPERTY_BASE + groupID + ".threads")
? config.getInt(KafkaRpcPluginConfig.PLUGIN_PROPERTY_BASE + groupID + ".threads")
: KafkaRpcPluginConfig.DEFAULT_CONSUMER_THREADS;
kafka_consumers = new ArrayList(num_threads);
final String deser_class = config.getString(
KafkaRpcPluginConfig.PLUGIN_PROPERTY_BASE + groupID + ".deserializer");
if (Strings.isNullOrEmpty(deser_class)) {
throw new IllegalArgumentException("Deserializer class cannot be null or empty.");
}
try {
Class> clazz = Class.forName(deser_class);
if (clazz != null) {
deserializer = (Deserializer) clazz.getDeclaredConstructor().newInstance();
}
} catch (ClassNotFoundException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("No class [" + deser_class + "] found on the class path, "
+ "trying the plugin loader.");
}
} catch (InstantiationException e) {
LOG.warn("Found instance of [" + deser_class
+ "] but failed to instantiate it:", e);
} catch (IllegalAccessException e) {
LOG.warn("Found instance of [" + deser_class
+ "] but failed to instantiate it:", e);
} catch (IllegalArgumentException e) {
LOG.warn("Found instance of [" + deser_class
+ "] but failed to instantiate it:", e);
} catch (InvocationTargetException e) {
LOG.warn("Found instance of [" + deser_class
+ "] but failed to instantiate it:", e);
} catch (NoSuchMethodException e) {
LOG.warn("Found instance of [" + deser_class
+ "] but failed to instantiate it:", e);
} catch (SecurityException e) {
LOG.warn("Found instance of [" + deser_class
+ "] but failed to instantiate it:", e);
}
if (deserializer == null) {
deserializer = PluginLoader.loadSpecificPlugin(deser_class, Deserializer.class);
}
if (deserializer == null) {
throw new IllegalArgumentException("Unable to find a deserializer "
+ "for class [" + deser_class + "]");
}
for (int i = 0; i < num_threads; i++) {
kafka_consumers.add(new KafkaRpcPluginThread(this, i, topics));
}
timer.newTimeout(this, config.threadCheckInterval(), TimeUnit.MILLISECONDS);
}
@Override
public String toString() {
final StringBuilder buf = new StringBuilder();
buf.append("groupID=")
.append(group_id)
.append(", type=")
.append(consumer_type)
.append(", currentRate=")
.append(current_rate)
.append(", numThreads=")
.append(num_threads)
.append(", topics=")
.append(topics);
return buf.toString();
}
/**
* Starts the threads given the thread pool
*/
public void start() {
if (current_rate > 0) {
for (final Thread t : kafka_consumers) {
pool.execute(t);
}
}
}
/** Gracefully shuts down all of the consumer threads */
public void shutdown() {
for (final KafkaRpcPluginThread consumer : kafka_consumers) {
consumer.shutdown();
}
}
/** @return the group ID to use as the consumer group */
public String getGroupID() {
return group_id;
}
/** @return the rate limiter for this group. Shared amongst partitions */
public RateLimiter getRateLimiter() {
return rate_limiter;
}
/** @return the parent object that controls this group */
public KafkaRpcPlugin getParent() {
return parent;
}
/** @return the consumer type */
public TsdbConsumerType getConsumerType() {
return consumer_type;
}
/** Increments the rebalance failure counter */
public void incrementRebalanceFailures() {
rebalance_failures.incrementAndGet();
}
/** @return the number of thread restarts for all threads */
public long getRestarts() {
return restarts.get();
}
/** @return the number of rebalance failures across all threads */
public long getRebalanceFailures() {
return rebalance_failures.get();
}
/** @return the number of messages received across all threads */
public long getMessagesReceived() {
long temp = 0;
for (final KafkaRpcPluginThread consumer : kafka_consumers) {
temp += consumer.getMessagesReceived();
}
return temp;
}
/** @return the number of datapoints received across all threads */
public long getDatapointsReceived() {
long temp = 0;
for (final KafkaRpcPluginThread consumer : kafka_consumers) {
temp += consumer.getDatapointsReceived();
}
return temp;
}
/** @return the number of seconds spent waiting on the rate limiter */
public double getCumulativeRateDelay() {
double temp = 0;
for (final KafkaRpcPluginThread consumer : kafka_consumers) {
temp += consumer.getCumulativeRateDelay();
}
return temp;
}
/** @return the number of ms spent waiting for messages from Kafka */
public double getKafkaWaitTime() {
double temp = 0;
for (final KafkaRpcPluginThread consumer : kafka_consumers) {
temp += consumer.getKafkaWaitTime();
}
return temp;
}
/** @return the number of deserialization errors across all threads */
public long getDeserializationErrors() {
long temp = 0;
for (final KafkaRpcPluginThread consumer : kafka_consumers) {
temp += consumer.getDeserializationErrors();
}
return temp;
}
/**
* Populates the given map with the aggregate of counters across all threads.
* @param counters The map to populate with results
*/
public void getNamespaceCounters(final Map> counters) {
for (final KafkaRpcPluginThread consumer : kafka_consumers) {
final Map> threadCounters =
consumer.getNamespaceCounters();
for (final Entry> ctr :
threadCounters.entrySet()) {
Map tempMap = counters.get(ctr.getKey());
if (tempMap == null) {
tempMap = new HashMap(1);
counters.put(ctr.getKey(), tempMap);
}
for (final Entry namespace : ctr.getValue().entrySet()) {
final Long value = tempMap.get(namespace.getKey());
if (value == null) {
tempMap.put(namespace.getKey(), namespace.getValue().get());
} else {
tempMap.put(namespace.getKey(), value + namespace.getValue().get());
}
}
}
}
}
/** @return a map of per thread statistics */
public Map> getPerThreadMetrics() {
final Map> map =
new HashMap>();
for (final KafkaRpcPluginThread consumer : kafka_consumers) {
final Map thread = new HashMap();
thread.put("messagesReceived", (double)consumer.getMessagesReceived());
thread.put("datapointsReceived", (double)consumer.getDatapointsReceived());
thread.put("cumulativeRateDelay", consumer.getCumulativeRateDelay());
thread.put("kafkaWaitTime", consumer.getKafkaWaitTime());
map.put(consumer.threadID(), thread);
}
return map;
}
/** @return the currently configured rate of the group */
public double getRate() {
return current_rate == 0 ? 0 : rate_limiter.getRate();
}
/**
* Sets the rate limit for the group. If the rate is zero, all consumer threads
* are killed. If the rate is greater than zero then any stopped consumer threads
* will be restarted at the next monitoring interval.
* @param rate The rate to set. Must be zero or greater.
*/
public void setRate(final double rate) {
if (rate < 0) {
throw new IllegalArgumentException("Rate cannot be less than zero " + rate);
}
current_rate = rate;
if (rate == 0) {
// kill the threads!
LOG.info("The rate has been set to zero for " + this + ". Killing threads.");
for (final KafkaRpcPluginThread writer : kafka_consumers) {
try {
writer.shutdown();
} catch (Exception e) {
LOG.error("Exception shutting down thread " + writer, e);
}
}
} else {
// limiter requires a positive value, can't set it to 0
rate_limiter.setRate(rate);
}
}
/** @return The deserializer for this group. */
public Deserializer getDeserializer() {
return deserializer;
}
/**
* Responsible for restarting dead threads. If the current rate is set to zero
* then the threads will not be restarted.
*/
public void run(final Timeout timeout) throws Exception {
if (LOG.isDebugEnabled()) {
LOG.debug("Running thread monitor on " + this);
}
try {
// only check threads if the rate is greater than zero
if (current_rate > 0) {
for (final KafkaRpcPluginThread writer : kafka_consumers) {
if (LOG.isDebugEnabled()) {
LOG.debug("Writer [" + writer + "] thread state: " + writer.isThreadRunning());
}
if (!writer.isThreadRunning()) {
LOG.warn("Writer [" + writer + "] was terminated, restarting");
pool.execute(writer);
restarts.incrementAndGet();
}
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Consumer group " + this + " has a rate limit of 0, not running");
}
}
} catch (Exception e) {
LOG.error("Failure while monitoring threads for group: " + this, e);
} catch (Throwable e) {
LOG.error("Fatal exception in group thread: " + this, e);
System.exit(1);
}
timer.newTimeout(this, config.threadCheckInterval(), TimeUnit.MILLISECONDS);
}
}