org.apache.camel.component.kafka.KafkaProducer Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.kafka;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import org.apache.camel.AsyncCallback;
import org.apache.camel.Exchange;
import org.apache.camel.Message;
import org.apache.camel.component.kafka.producer.support.DelegatingCallback;
import org.apache.camel.component.kafka.producer.support.KafkaProducerCallBack;
import org.apache.camel.component.kafka.producer.support.KafkaProducerMetadataCallBack;
import org.apache.camel.component.kafka.producer.support.KeyValueHolderIterator;
import org.apache.camel.component.kafka.producer.support.ProducerUtil;
import org.apache.camel.component.kafka.producer.support.PropagatedHeadersProvider;
import org.apache.camel.component.kafka.serde.KafkaHeaderSerializer;
import org.apache.camel.health.HealthCheckHelper;
import org.apache.camel.health.WritableHealthCheckRepository;
import org.apache.camel.spi.HeaderFilterStrategy;
import org.apache.camel.support.DefaultAsyncProducer;
import org.apache.camel.support.SynchronizationAdapter;
import org.apache.camel.util.KeyValueHolder;
import org.apache.camel.util.ObjectHelper;
import org.apache.camel.util.ReflectionHelper;
import org.apache.camel.util.URISupport;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.NetworkClient;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.camel.component.kafka.producer.support.ProducerUtil.tryConvertToSerializedType;
public class KafkaProducer extends DefaultAsyncProducer {
private static final Logger LOG = LoggerFactory.getLogger(KafkaProducer.class);
@SuppressWarnings("rawtypes")
private org.apache.kafka.clients.producer.Producer kafkaProducer;
private KafkaProducerHealthCheck producerHealthCheck;
private WritableHealthCheckRepository healthCheckRepository;
private String clientId;
private String transactionId;
private final KafkaEndpoint endpoint;
private final KafkaConfiguration configuration;
private ExecutorService workerPool;
private boolean shutdownWorkerPool;
private volatile boolean closeKafkaProducer;
private final String endpointTopic;
private final Integer configPartitionKey;
private final String configKey;
public KafkaProducer(KafkaEndpoint endpoint) {
super(endpoint);
this.endpoint = endpoint;
this.configuration = endpoint.getConfiguration();
endpointTopic = URISupport.extractRemainderPath(URI.create(endpoint.getEndpointUri()), true);
configPartitionKey = configuration.getPartitionKey();
configKey = configuration.getKey();
}
@Override
public KafkaEndpoint getEndpoint() {
return (KafkaEndpoint) super.getEndpoint();
}
Properties getProps() {
Properties props = configuration.createProducerProperties();
endpoint.updateClassProperties(props);
String brokers = endpoint.getKafkaClientFactory().getBrokers(configuration);
if (brokers != null) {
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
}
return props;
}
public boolean isReady() {
boolean ready = true;
try {
if (kafkaProducer instanceof org.apache.kafka.clients.producer.KafkaProducer) {
// need to use reflection to access the network client which has API to check if the client has ready
// connections
org.apache.kafka.clients.producer.KafkaProducer kp
= (org.apache.kafka.clients.producer.KafkaProducer) kafkaProducer;
org.apache.kafka.clients.producer.internals.Sender sender
= (org.apache.kafka.clients.producer.internals.Sender) ReflectionHelper
.getField(kp.getClass().getDeclaredField("sender"), kp);
NetworkClient nc
= (NetworkClient) ReflectionHelper.getField(sender.getClass().getDeclaredField("client"), sender);
LOG.trace(
"Health-Check calling org.apache.kafka.clients.NetworkClient.hasReadyNode");
ready = nc.hasReadyNodes(System.currentTimeMillis());
}
} catch (Exception e) {
// ignore
LOG.debug("Cannot check hasReadyNodes on KafkaProducer client (NetworkClient) due to "
+ e.getMessage() + ". This exception is ignored.",
e);
}
return ready;
}
@SuppressWarnings("rawtypes")
public org.apache.kafka.clients.producer.Producer getKafkaProducer() {
return kafkaProducer;
}
/**
* To use a custom {@link org.apache.kafka.clients.producer.KafkaProducer} instance.
*/
@SuppressWarnings("rawtypes")
public void setKafkaProducer(org.apache.kafka.clients.producer.Producer kafkaProducer) {
this.kafkaProducer = kafkaProducer;
}
public ExecutorService getWorkerPool() {
return workerPool;
}
public void setWorkerPool(ExecutorService workerPool) {
this.workerPool = workerPool;
}
@Override
@SuppressWarnings("rawtypes")
protected void doStart() throws Exception {
Properties props = getProps();
if (kafkaProducer == null) {
createProducer(props);
}
// init kafka transaction
transactionId = props.getProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG);
if (transactionId != null) {
kafkaProducer.initTransactions();
}
// if we are in asynchronous mode we need a worker pool
if (!configuration.isSynchronous() && workerPool == null) {
// If custom worker pool is provided, then use it, else create a new one.
if (configuration.getWorkerPool() != null) {
workerPool = configuration.getWorkerPool();
shutdownWorkerPool = false;
} else {
workerPool = endpoint.createProducerExecutor();
// we create a thread pool so we should also shut it down
shutdownWorkerPool = true;
}
}
// init client id which we may need to get from the kafka producer via reflection
if (clientId == null) {
clientId = getProps().getProperty(CommonClientConfigs.CLIENT_ID_CONFIG);
if (clientId == null) {
try {
clientId = (String) ReflectionHelper
.getField(kafkaProducer.getClass().getDeclaredField("clientId"), kafkaProducer);
} catch (Exception e) {
// ignore
clientId = "";
}
}
}
// health-check is optional so discover and resolve
healthCheckRepository = HealthCheckHelper.getHealthCheckRepository(
endpoint.getCamelContext(),
"producers",
WritableHealthCheckRepository.class);
if (healthCheckRepository != null) {
producerHealthCheck = new KafkaProducerHealthCheck(this, clientId);
producerHealthCheck.setEnabled(getEndpoint().getComponent().isHealthCheckProducerEnabled());
healthCheckRepository.addHealthCheck(producerHealthCheck);
}
}
@Override
protected void doStop() throws Exception {
if (healthCheckRepository != null && producerHealthCheck != null) {
healthCheckRepository.removeHealthCheck(producerHealthCheck);
producerHealthCheck = null;
}
if (kafkaProducer != null && closeKafkaProducer) {
LOG.debug("Closing KafkaProducer: {}", kafkaProducer);
kafkaProducer.close();
kafkaProducer = null;
}
if (shutdownWorkerPool && workerPool != null) {
int timeout = configuration.getShutdownTimeout();
LOG.debug("Shutting down Kafka producer worker threads with timeout {} millis", timeout);
endpoint.getCamelContext().getExecutorServiceManager().shutdownGraceful(workerPool, timeout);
workerPool = null;
}
}
private void createProducer(Properties props) {
ClassLoader threadClassLoader = Thread.currentThread().getContextClassLoader();
try {
// Kafka uses reflection for loading authentication settings,
// use its classloader
Thread.currentThread()
.setContextClassLoader(org.apache.kafka.clients.producer.KafkaProducer.class.getClassLoader());
LOG.trace("Creating KafkaProducer");
kafkaProducer = endpoint.getKafkaClientFactory().getProducer(props);
closeKafkaProducer = true;
} finally {
Thread.currentThread().setContextClassLoader(threadClassLoader);
}
LOG.debug("Created KafkaProducer: {}", kafkaProducer);
}
protected Iterator>> createRecordIterable(
Exchange exchange, Message message) {
String topic = evaluateTopic(message);
PropagatedHeadersProvider propagatedHeadersProvider
= new PropagatedHeadersProvider(this, configuration, exchange, message);
Object body = message.getBody();
Iterator
© 2015 - 2025 Weber Informatics LLC | Privacy Policy