All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.springframework.kafka.core.KafkaTemplate Maven / Gradle / Ivy

There is a newer version: 3.1.4
Show newest version
/*
 * Copyright 2015-2020 the original author or authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.springframework.kafka.core;

import java.time.Duration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.ConsumerGroupMetadata;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;

import org.springframework.beans.factory.BeanNameAware;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.ApplicationListener;
import org.springframework.context.event.ContextStoppedEvent;
import org.springframework.core.log.LogAccessor;
import org.springframework.kafka.KafkaException;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.KafkaUtils;
import org.springframework.kafka.support.LoggingProducerListener;
import org.springframework.kafka.support.ProducerListener;
import org.springframework.kafka.support.SendResult;
import org.springframework.kafka.support.TransactionSupport;
import org.springframework.kafka.support.converter.MessageConverter;
import org.springframework.kafka.support.converter.MessagingMessageConverter;
import org.springframework.kafka.support.converter.RecordMessageConverter;
import org.springframework.kafka.support.micrometer.MicrometerHolder;
import org.springframework.lang.Nullable;
import org.springframework.messaging.Message;
import org.springframework.transaction.support.TransactionSynchronizationManager;
import org.springframework.util.Assert;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.SettableListenableFuture;


/**
 * A template for executing high-level operations. When used with a
 * {@link DefaultKafkaProducerFactory}, the template is thread-safe. The producer factory
 * and {@link org.apache.kafka.clients.producer.KafkaProducer} ensure this; refer to their
 * respective javadocs.
 *
 * @param  the key type.
 * @param  the value type.
 *
 * @author Marius Bogoevici
 * @author Gary Russell
 * @author Igor Stepanov
 * @author Artem Bilan
 * @author Biju Kunjummen
 * @author Endika Guti?rrez
 */
public class KafkaTemplate implements KafkaOperations, ApplicationContextAware, BeanNameAware,
		ApplicationListener, DisposableBean {

	protected final LogAccessor logger = new LogAccessor(LogFactory.getLog(this.getClass())); //NOSONAR

	private final ProducerFactory producerFactory;

	private final boolean customProducerFactory;

	private final boolean autoFlush;

	private final boolean transactional;

	private final ThreadLocal> producers = new ThreadLocal<>();

	private final Map micrometerTags = new HashMap<>();

	private String beanName = "kafkaTemplate";

	private ApplicationContext applicationContext;

	private RecordMessageConverter messageConverter = new MessagingMessageConverter();

	private String defaultTopic;

	private ProducerListener producerListener = new LoggingProducerListener();

	private String transactionIdPrefix;

	private Duration closeTimeout = ProducerFactoryUtils.DEFAULT_CLOSE_TIMEOUT;

	private boolean allowNonTransactional;

	private volatile boolean micrometerEnabled = true;

	private volatile MicrometerHolder micrometerHolder;

	/**
	 * Create an instance using the supplied producer factory and autoFlush false.
	 * @param producerFactory the producer factory.
	 */
	public KafkaTemplate(ProducerFactory producerFactory) {
		this(producerFactory, false);
	}

	/**
	 * Create an instance using the supplied producer factory and properties, with
	 * autoFlush false. If the configOverrides is not null or empty, a new
	 * {@link DefaultKafkaProducerFactory} will be created with merged producer properties
	 * with the overrides being applied after the supplied factory's properties.
	 * @param producerFactory the producer factory.
	 * @param configOverrides producer configuration properties to override.
	 * @since 2.5
	 */
	public KafkaTemplate(ProducerFactory producerFactory, @Nullable Map configOverrides) {
		this(producerFactory, false, configOverrides);
	}

	/**
	 * Create an instance using the supplied producer factory and autoFlush setting.
	 * 

* Set autoFlush to {@code true} if you have configured the producer's * {@code linger.ms} to a non-default value and wish send operations on this template * to occur immediately, regardless of that setting, or if you wish to block until the * broker has acknowledged receipt according to the producer's {@code acks} property. * @param producerFactory the producer factory. * @param autoFlush true to flush after each send. * @see Producer#flush() */ public KafkaTemplate(ProducerFactory producerFactory, boolean autoFlush) { this(producerFactory, autoFlush, null); } /** * Create an instance using the supplied producer factory and autoFlush setting. *

* Set autoFlush to {@code true} if you have configured the producer's * {@code linger.ms} to a non-default value and wish send operations on this template * to occur immediately, regardless of that setting, or if you wish to block until the * broker has acknowledged receipt according to the producer's {@code acks} property. * If the configOverrides is not null or empty, a new * {@link DefaultKafkaProducerFactory} will be created with merged producer properties * with the overrides being applied after the supplied factory's properties. * @param producerFactory the producer factory. * @param autoFlush true to flush after each send. * @param configOverrides producer configuration properties to override. * @since 2.5 * @see Producer#flush() */ public KafkaTemplate(ProducerFactory producerFactory, boolean autoFlush, @Nullable Map configOverrides) { Assert.notNull(producerFactory, "'producerFactory' cannot be null"); this.autoFlush = autoFlush; this.transactional = producerFactory.transactionCapable(); this.micrometerEnabled = KafkaUtils.MICROMETER_PRESENT; this.customProducerFactory = configOverrides != null && configOverrides.size() > 0; if (this.customProducerFactory) { Map configs = new HashMap<>(producerFactory.getConfigurationProperties()); configs.putAll(configOverrides); DefaultKafkaProducerFactory newFactory = new DefaultKafkaProducerFactory<>(configs, producerFactory.getKeySerializerSupplier(), producerFactory.getValueSerializerSupplier()); newFactory.setPhysicalCloseTimeout((int) producerFactory.getPhysicalCloseTimeout().getSeconds()); newFactory.setProducerPerConsumerPartition(producerFactory.isProducerPerConsumerPartition()); newFactory.setProducerPerThread(producerFactory.isProducerPerThread()); this.producerFactory = newFactory; } else { this.producerFactory = producerFactory; } } @Override public void setBeanName(String name) { this.beanName = name; } @Override public void setApplicationContext(ApplicationContext applicationContext) { this.applicationContext = applicationContext; if (this.customProducerFactory) { ((DefaultKafkaProducerFactory) this.producerFactory).setApplicationContext(applicationContext); } } /** * The default topic for send methods where a topic is not * provided. * @return the topic. */ public String getDefaultTopic() { return this.defaultTopic; } /** * Set the default topic for send methods where a topic is not * provided. * @param defaultTopic the topic. */ public void setDefaultTopic(String defaultTopic) { this.defaultTopic = defaultTopic; } /** * Set a {@link ProducerListener} which will be invoked when Kafka acknowledges * a send operation. By default a {@link LoggingProducerListener} is configured * which logs errors only. * @param producerListener the listener; may be {@code null}. */ public void setProducerListener(@Nullable ProducerListener producerListener) { this.producerListener = producerListener; } /** * Return the message converter. * @return the message converter. */ public MessageConverter getMessageConverter() { return this.messageConverter; } /** * Set the message converter to use. * @param messageConverter the message converter. */ public void setMessageConverter(RecordMessageConverter messageConverter) { Assert.notNull(messageConverter, "'messageConverter' cannot be null"); this.messageConverter = messageConverter; } @Override public boolean isTransactional() { return this.transactional; } public String getTransactionIdPrefix() { return this.transactionIdPrefix; } /** * Set a transaction id prefix to override the prefix in the producer factory. * @param transactionIdPrefix the prefix. * @since 2.3 */ public void setTransactionIdPrefix(String transactionIdPrefix) { this.transactionIdPrefix = transactionIdPrefix; } /** * Set the maximum time to wait when closing a producer; default 5 seconds. * @param closeTimeout the close timeout. * @since 2.1.14 */ public void setCloseTimeout(Duration closeTimeout) { Assert.notNull(closeTimeout, "'closeTimeout' cannot be null"); this.closeTimeout = closeTimeout; } /** * Set to true to allow a non-transactional send when the template is transactional. * @param allowNonTransactional true to allow. * @since 2.4.3 */ public void setAllowNonTransactional(boolean allowNonTransactional) { this.allowNonTransactional = allowNonTransactional; } @Override public boolean isAllowNonTransactional() { return this.allowNonTransactional; } /** * Set to false to disable micrometer timers, if micrometer is on the class path. * @param micrometerEnabled false to disable. * @since 2.5 */ public void setMicrometerEnabled(boolean micrometerEnabled) { this.micrometerEnabled = micrometerEnabled; } /** * Set additional tags for the Micrometer listener timers. * @param tags the tags. * @since 2.5 */ public void setMicrometerTags(Map tags) { if (tags != null) { this.micrometerTags.putAll(tags); } } /** * Return the producer factory used by this template. * @return the factory. * @since 2.2.5 */ @Override public ProducerFactory getProducerFactory() { return this.producerFactory; } /** * Return the producer factory used by this template based on the topic. * The default implementation returns the only producer factory. * @param topic the topic. * @return the factory. * @since 2.5 */ protected ProducerFactory getProducerFactory(String topic) { return this.producerFactory; } @Override public void onApplicationEvent(ContextStoppedEvent event) { if (this.customProducerFactory) { ((DefaultKafkaProducerFactory) this.producerFactory).onApplicationEvent(event); } } @Override public ListenableFuture> sendDefault(@Nullable V data) { return send(this.defaultTopic, data); } @Override public ListenableFuture> sendDefault(K key, @Nullable V data) { return send(this.defaultTopic, key, data); } @Override public ListenableFuture> sendDefault(Integer partition, K key, @Nullable V data) { return send(this.defaultTopic, partition, key, data); } @Override public ListenableFuture> sendDefault(Integer partition, Long timestamp, K key, @Nullable V data) { return send(this.defaultTopic, partition, timestamp, key, data); } @Override public ListenableFuture> send(String topic, @Nullable V data) { ProducerRecord producerRecord = new ProducerRecord<>(topic, data); return doSend(producerRecord); } @Override public ListenableFuture> send(String topic, K key, @Nullable V data) { ProducerRecord producerRecord = new ProducerRecord<>(topic, key, data); return doSend(producerRecord); } @Override public ListenableFuture> send(String topic, Integer partition, K key, @Nullable V data) { ProducerRecord producerRecord = new ProducerRecord<>(topic, partition, key, data); return doSend(producerRecord); } @Override public ListenableFuture> send(String topic, Integer partition, Long timestamp, K key, @Nullable V data) { ProducerRecord producerRecord = new ProducerRecord<>(topic, partition, timestamp, key, data); return doSend(producerRecord); } @Override public ListenableFuture> send(ProducerRecord record) { return doSend(record); } @SuppressWarnings("unchecked") @Override public ListenableFuture> send(Message message) { ProducerRecord producerRecord = this.messageConverter.fromMessage(message, this.defaultTopic); if (!producerRecord.headers().iterator().hasNext()) { // possibly no Jackson byte[] correlationId = message.getHeaders().get(KafkaHeaders.CORRELATION_ID, byte[].class); if (correlationId != null) { producerRecord.headers().add(KafkaHeaders.CORRELATION_ID, correlationId); } } return doSend((ProducerRecord) producerRecord); } @Override public List partitionsFor(String topic) { Producer producer = getTheProducer(); try { return producer.partitionsFor(topic); } finally { closeProducer(producer, inTransaction()); } } @Override public Map metrics() { Producer producer = getTheProducer(); try { return producer.metrics(); } finally { closeProducer(producer, inTransaction()); } } @Override public T execute(ProducerCallback callback) { Assert.notNull(callback, "'callback' cannot be null"); Producer producer = getTheProducer(); try { return callback.doInKafka(producer); } finally { closeProducer(producer, inTransaction()); } } @Override public T executeInTransaction(OperationsCallback callback) { Assert.notNull(callback, "'callback' cannot be null"); Assert.state(this.transactional, "Producer factory does not support transactions"); Producer producer = this.producers.get(); Assert.state(producer == null, "Nested calls to 'executeInTransaction' are not allowed"); String transactionIdSuffix; if (this.producerFactory.isProducerPerConsumerPartition()) { transactionIdSuffix = TransactionSupport.getTransactionIdSuffix(); TransactionSupport.clearTransactionIdSuffix(); } else { transactionIdSuffix = null; } producer = this.producerFactory.createProducer(this.transactionIdPrefix); try { producer.beginTransaction(); } catch (Exception e) { closeProducer(producer, false); throw e; } this.producers.set(producer); try { T result = callback.doInOperations(this); try { producer.commitTransaction(); } catch (Exception e) { throw new SkipAbortException(e); } return result; } catch (SkipAbortException e) { // NOSONAR - exception flow control throw ((RuntimeException) e.getCause()); // NOSONAR - lost stack trace } catch (Exception e) { producer.abortTransaction(); throw e; } finally { if (transactionIdSuffix != null) { TransactionSupport.setTransactionIdSuffix(transactionIdSuffix); } this.producers.remove(); closeProducer(producer, false); } } /** * {@inheritDoc} *

Note It only makes sense to invoke this method if the * {@link ProducerFactory} serves up a singleton producer (such as the * {@link DefaultKafkaProducerFactory}). */ @Override public void flush() { Producer producer = getTheProducer(); try { producer.flush(); } finally { closeProducer(producer, inTransaction()); } } @Override public void sendOffsetsToTransaction(Map offsets) { sendOffsetsToTransaction(offsets, KafkaUtils.getConsumerGroupId()); } @Override public void sendOffsetsToTransaction(Map offsets, String consumerGroupId) { producerForOffsets().sendOffsetsToTransaction(offsets, consumerGroupId); } @Override public void sendOffsetsToTransaction(Map offsets, ConsumerGroupMetadata groupMetadata) { producerForOffsets().sendOffsetsToTransaction(offsets, groupMetadata); } private Producer producerForOffsets() { Producer producer = this.producers.get(); if (producer == null) { @SuppressWarnings("unchecked") KafkaResourceHolder resourceHolder = (KafkaResourceHolder) TransactionSynchronizationManager .getResource(this.producerFactory); Assert.isTrue(resourceHolder != null, "No transaction in process"); producer = resourceHolder.getProducer(); } return producer; } protected void closeProducer(Producer producer, boolean inTx) { if (!inTx) { producer.close(this.closeTimeout); } } /** * Send the producer record. * @param producerRecord the producer record. * @return a Future for the {@link org.apache.kafka.clients.producer.RecordMetadata * RecordMetadata}. */ protected ListenableFuture> doSend(final ProducerRecord producerRecord) { final Producer producer = getTheProducer(producerRecord.topic()); this.logger.trace(() -> "Sending: " + producerRecord); final SettableListenableFuture> future = new SettableListenableFuture<>(); Object sample = null; if (this.micrometerEnabled && this.micrometerHolder == null) { this.micrometerHolder = obtainMicrometerHolder(); } if (this.micrometerHolder != null) { sample = this.micrometerHolder.start(); } Future sendFuture = producer.send(producerRecord, buildCallback(producerRecord, producer, future, sample)); // May be an immediate failure if (sendFuture.isDone()) { try { sendFuture.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new KafkaException("Interrupted", e); } catch (ExecutionException e) { throw new KafkaException("Send failed", e.getCause()); // NOSONAR, stack trace } } if (this.autoFlush) { flush(); } this.logger.trace(() -> "Sent: " + producerRecord); return future; } private Callback buildCallback(final ProducerRecord producerRecord, final Producer producer, final SettableListenableFuture> future, Object sample) { return (metadata, exception) -> { try { if (exception == null) { if (sample != null) { this.micrometerHolder.success(sample); } future.set(new SendResult<>(producerRecord, metadata)); if (KafkaTemplate.this.producerListener != null) { KafkaTemplate.this.producerListener.onSuccess(producerRecord, metadata); } KafkaTemplate.this.logger.trace(() -> "Sent ok: " + producerRecord + ", metadata: " + metadata); } else { if (sample != null) { this.micrometerHolder.failure(sample, exception.getClass().getSimpleName()); } future.setException(new KafkaProducerException(producerRecord, "Failed to send", exception)); if (KafkaTemplate.this.producerListener != null) { KafkaTemplate.this.producerListener.onError(producerRecord, exception); } KafkaTemplate.this.logger.debug(exception, () -> "Failed to send: " + producerRecord); } } finally { if (!KafkaTemplate.this.transactional) { closeProducer(producer, false); } } }; } /** * Return true if the template is currently running in a transaction on the calling * thread. * @return true if a transaction is running. * @since 2.2.1 */ @Override public boolean inTransaction() { return this.transactional && (this.producers.get() != null || TransactionSynchronizationManager.getResource(this.producerFactory) != null || TransactionSynchronizationManager.isActualTransactionActive()); } private Producer getTheProducer() { return getTheProducer(null); } protected Producer getTheProducer(@SuppressWarnings("unused") @Nullable String topic) { boolean transactionalProducer = this.transactional; if (transactionalProducer) { boolean inTransaction = inTransaction(); Assert.state(this.allowNonTransactional || inTransaction, "No transaction is in process; " + "possible solutions: run the template operation within the scope of a " + "template.executeInTransaction() operation, start a transaction with @Transactional " + "before invoking the template method, " + "run in a transaction started by a listener container when consuming a record"); if (!inTransaction) { transactionalProducer = false; } } if (transactionalProducer) { Producer producer = this.producers.get(); if (producer != null) { return producer; } KafkaResourceHolder holder = ProducerFactoryUtils .getTransactionalResourceHolder(this.producerFactory, this.transactionIdPrefix, this.closeTimeout); return holder.getProducer(); } else if (this.allowNonTransactional) { return this.producerFactory.createNonTransactionalProducer(); } else if (topic == null) { return this.producerFactory.createProducer(); } else { return getProducerFactory(topic).createProducer(); } } @Nullable private MicrometerHolder obtainMicrometerHolder() { MicrometerHolder holder = null; try { if (KafkaUtils.MICROMETER_PRESENT) { holder = new MicrometerHolder(this.applicationContext, this.beanName, "spring.kafka.template", "KafkaTemplate Timer", this.micrometerTags); } } catch (@SuppressWarnings("unused") IllegalStateException ex) { this.micrometerEnabled = false; } return holder; } @Override public void destroy() { if (this.micrometerHolder != null) { this.micrometerHolder.destroy(); } if (this.customProducerFactory) { ((DefaultKafkaProducerFactory) this.producerFactory).destroy(); } } @SuppressWarnings("serial") private static final class SkipAbortException extends RuntimeException { SkipAbortException(Throwable cause) { super(cause); } } }





© 2015 - 2024 Weber Informatics LLC | Privacy Policy