org.apache.kafka.clients.consumer.internals.AbstractFetch Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of jena-fmod-kafka Show documentation
Show all versions of jena-fmod-kafka Show documentation
Apache Jena Fuseki server Kafka connector
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer.internals;
import org.apache.kafka.clients.ApiVersions;
import org.apache.kafka.clients.ClientResponse;
import org.apache.kafka.clients.FetchSessionHandler;
import org.apache.kafka.clients.KafkaClient;
import org.apache.kafka.clients.Metadata;
import org.apache.kafka.clients.NetworkClientUtils;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.internals.IdempotentCloser;
import org.apache.kafka.common.message.FetchResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.FetchRequest;
import org.apache.kafka.common.requests.FetchResponse;
import org.apache.kafka.common.utils.BufferSupplier;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Timer;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.helpers.MessageFormatter;
import java.io.Closeable;
import java.time.Duration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import static org.apache.kafka.clients.consumer.internals.FetchUtils.requestMetadataUpdate;
/**
* {@code AbstractFetch} represents the basic state and logic for record fetching processing.
*/
public abstract class AbstractFetch implements Closeable {
private final Logger log;
private final IdempotentCloser idempotentCloser = new IdempotentCloser();
protected final LogContext logContext;
protected final ConsumerMetadata metadata;
protected final SubscriptionState subscriptions;
protected final FetchConfig fetchConfig;
protected final Time time;
protected final FetchMetricsManager metricsManager;
protected final FetchBuffer fetchBuffer;
protected final BufferSupplier decompressionBufferSupplier;
protected final Set nodesWithPendingFetchRequests;
private final Map sessionHandlers;
private final ApiVersions apiVersions;
public AbstractFetch(final LogContext logContext,
final ConsumerMetadata metadata,
final SubscriptionState subscriptions,
final FetchConfig fetchConfig,
final FetchBuffer fetchBuffer,
final FetchMetricsManager metricsManager,
final Time time,
final ApiVersions apiVersions) {
this.log = logContext.logger(AbstractFetch.class);
this.logContext = logContext;
this.metadata = metadata;
this.subscriptions = subscriptions;
this.fetchConfig = fetchConfig;
this.fetchBuffer = fetchBuffer;
this.decompressionBufferSupplier = BufferSupplier.create();
this.sessionHandlers = new HashMap<>();
this.nodesWithPendingFetchRequests = new HashSet<>();
this.metricsManager = metricsManager;
this.time = time;
this.apiVersions = apiVersions;
}
/**
* Check if the node is disconnected and unavailable for immediate reconnection (i.e. if it is in
* reconnect backoff window following the disconnect).
*
* @param node {@link Node} to check for availability
* @see NetworkClientUtils#isUnavailable(KafkaClient, Node, Time)
*/
protected abstract boolean isUnavailable(Node node);
/**
* Checks for an authentication error on a given node and throws the exception if it exists.
*
* @param node {@link Node} to check for a previous {@link AuthenticationException}; if found it is thrown
* @see NetworkClientUtils#maybeThrowAuthFailure(KafkaClient, Node)
*/
protected abstract void maybeThrowAuthFailure(Node node);
/**
* Return whether we have any completed fetches pending return to the user. This method is thread-safe. Has
* visibility for testing.
*
* @return true if there are completed fetches, false otherwise
*/
boolean hasCompletedFetches() {
return !fetchBuffer.isEmpty();
}
/**
* Return whether we have any completed fetches that are fetchable. This method is thread-safe.
* @return true if there are completed fetches that can be returned, false otherwise
*/
public boolean hasAvailableFetches() {
return fetchBuffer.hasCompletedFetches(fetch -> subscriptions.isFetchable(fetch.partition));
}
/**
* Implements the core logic for a successful fetch response.
*
* @param fetchTarget {@link Node} from which the fetch data was requested
* @param data {@link FetchSessionHandler.FetchRequestData} that represents the session data
* @param resp {@link ClientResponse} from which the {@link FetchResponse} will be retrieved
*/
protected void handleFetchSuccess(final Node fetchTarget,
final FetchSessionHandler.FetchRequestData data,
final ClientResponse resp) {
try {
final FetchResponse response = (FetchResponse) resp.responseBody();
final FetchSessionHandler handler = sessionHandler(fetchTarget.id());
if (handler == null) {
log.error("Unable to find FetchSessionHandler for node {}. Ignoring fetch response.",
fetchTarget.id());
return;
}
final short requestVersion = resp.requestHeader().apiVersion();
if (!handler.handleResponse(response, requestVersion)) {
if (response.error() == Errors.FETCH_SESSION_TOPIC_ID_ERROR) {
metadata.requestUpdate(false);
}
return;
}
final Map responseData = response.responseData(handler.sessionTopicNames(), requestVersion);
final Set partitions = new HashSet<>(responseData.keySet());
final FetchMetricsAggregator metricAggregator = new FetchMetricsAggregator(metricsManager, partitions);
Map partitionsWithUpdatedLeaderInfo = new HashMap<>();
for (Map.Entry entry : responseData.entrySet()) {
TopicPartition partition = entry.getKey();
FetchRequest.PartitionData requestData = data.sessionPartitions().get(partition);
if (requestData == null) {
String message;
if (data.metadata().isFull()) {
message = MessageFormatter.arrayFormat(
"Response for missing full request partition: partition={}; metadata={}",
new Object[]{partition, data.metadata()}).getMessage();
} else {
message = MessageFormatter.arrayFormat(
"Response for missing session request partition: partition={}; metadata={}; toSend={}; toForget={}; toReplace={}",
new Object[]{partition, data.metadata(), data.toSend(), data.toForget(), data.toReplace()}).getMessage();
}
// Received fetch response for missing session partition
throw new IllegalStateException(message);
}
long fetchOffset = requestData.fetchOffset;
FetchResponseData.PartitionData partitionData = entry.getValue();
log.debug("Fetch {} at offset {} for partition {} returned fetch data {}",
fetchConfig.isolationLevel, fetchOffset, partition, partitionData);
Errors partitionError = Errors.forCode(partitionData.errorCode());
if (partitionError == Errors.NOT_LEADER_OR_FOLLOWER || partitionError == Errors.FENCED_LEADER_EPOCH) {
log.debug("For {}, received error {}, with leaderIdAndEpoch {}", partition, partitionError, partitionData.currentLeader());
if (partitionData.currentLeader().leaderId() != -1 && partitionData.currentLeader().leaderEpoch() != -1) {
partitionsWithUpdatedLeaderInfo.put(partition, new Metadata.LeaderIdAndEpoch(
Optional.of(partitionData.currentLeader().leaderId()), Optional.of(partitionData.currentLeader().leaderEpoch())));
}
}
CompletedFetch completedFetch = new CompletedFetch(
logContext,
subscriptions,
decompressionBufferSupplier,
partition,
partitionData,
metricAggregator,
fetchOffset,
requestVersion);
fetchBuffer.add(completedFetch);
}
if (!partitionsWithUpdatedLeaderInfo.isEmpty()) {
List leaderNodes = response.data().nodeEndpoints().stream()
.map(e -> new Node(e.nodeId(), e.host(), e.port(), e.rack()))
.filter(e -> !e.equals(Node.noNode()))
.collect(Collectors.toList());
Set updatedPartitions = metadata.updatePartitionLeadership(partitionsWithUpdatedLeaderInfo, leaderNodes);
updatedPartitions.forEach(
tp -> {
log.debug("For {}, as the leader was updated, position will be validated.", tp);
subscriptions.maybeValidatePositionForCurrentLeader(apiVersions, tp, metadata.currentLeader(tp));
}
);
}
metricsManager.recordLatency(resp.requestLatencyMs());
} finally {
removePendingFetchRequest(fetchTarget, data.metadata().sessionId());
}
}
/**
* Implements the core logic for a failed fetch response.
*
* @param fetchTarget {@link Node} from which the fetch data was requested
* @param data {@link FetchSessionHandler.FetchRequestData} from request
* @param t {@link Throwable} representing the error that resulted in the failure
*/
protected void handleFetchFailure(final Node fetchTarget,
final FetchSessionHandler.FetchRequestData data,
final Throwable t) {
try {
final FetchSessionHandler handler = sessionHandler(fetchTarget.id());
if (handler != null) {
handler.handleError(t);
handler.sessionTopicPartitions().forEach(subscriptions::clearPreferredReadReplica);
}
} finally {
removePendingFetchRequest(fetchTarget, data.metadata().sessionId());
}
}
protected void handleCloseFetchSessionSuccess(final Node fetchTarget,
final FetchSessionHandler.FetchRequestData data,
final ClientResponse ignored) {
int sessionId = data.metadata().sessionId();
removePendingFetchRequest(fetchTarget, sessionId);
log.debug("Successfully sent a close message for fetch session: {} to node: {}", sessionId, fetchTarget);
}
public void handleCloseFetchSessionFailure(final Node fetchTarget,
final FetchSessionHandler.FetchRequestData data,
final Throwable t) {
int sessionId = data.metadata().sessionId();
removePendingFetchRequest(fetchTarget, sessionId);
log.debug("Unable to send a close message for fetch session: {} to node: {}. " +
"This may result in unnecessary fetch sessions at the broker.", sessionId, fetchTarget, t);
}
private void removePendingFetchRequest(Node fetchTarget, int sessionId) {
log.debug("Removing pending request for fetch session: {} for node: {}", sessionId, fetchTarget);
nodesWithPendingFetchRequests.remove(fetchTarget.id());
}
/**
* Creates a new {@link FetchRequest fetch request} in preparation for sending to the Kafka cluster.
*
* @param fetchTarget {@link Node} from which the fetch data will be requested
* @param requestData {@link FetchSessionHandler.FetchRequestData} that represents the session data
* @return {@link FetchRequest.Builder} that can be submitted to the broker
*/
protected FetchRequest.Builder createFetchRequest(final Node fetchTarget,
final FetchSessionHandler.FetchRequestData requestData) {
// Version 12 is the maximum version that could be used without topic IDs. See FetchRequest.json for schema
// changelog.
final short maxVersion = requestData.canUseTopicIds() ? ApiKeys.FETCH.latestVersion() : (short) 12;
final FetchRequest.Builder request = FetchRequest.Builder
.forConsumer(maxVersion, fetchConfig.maxWaitMs, fetchConfig.minBytes, requestData.toSend())
.isolationLevel(fetchConfig.isolationLevel)
.setMaxBytes(fetchConfig.maxBytes)
.metadata(requestData.metadata())
.removed(requestData.toForget())
.replaced(requestData.toReplace())
.rackId(fetchConfig.clientRackId);
log.debug("Sending {} {} to broker {}", fetchConfig.isolationLevel, requestData, fetchTarget);
// We add the node to the set of nodes with pending fetch requests before adding the
// listener because the future may have been fulfilled on another thread (e.g. during a
// disconnection being handled by the heartbeat thread) which will mean the listener
// will be invoked synchronously.
log.debug("Adding pending request for node {}", fetchTarget);
nodesWithPendingFetchRequests.add(fetchTarget.id());
return request;
}
/**
* Return the list of fetchable partitions, which are the set of partitions to which we are subscribed,
* but excluding any partitions for which we still have buffered data. The idea is that since the user
* has yet to process the data for the partition that has already been fetched, we should not go send for more data
* until the previously-fetched data has been processed.
*
* @return {@link Set} of {@link TopicPartition topic partitions} for which we should fetch data
*/
private Set fetchablePartitions() {
// This is the set of partitions we have in our buffer
Set buffered = fetchBuffer.bufferedPartitions();
// This is the test that returns true if the partition is *not* buffered
Predicate isNotBuffered = tp -> !buffered.contains(tp);
// Return all partitions that are in an otherwise fetchable state *and* for which we don't already have some
// messages sitting in our buffer.
return new HashSet<>(subscriptions.fetchablePartitions(isNotBuffered));
}
/**
* Determine from which replica to read: the preferred or the leader. The preferred replica is used
* iff:
*
*
* - A preferred replica was previously set
* - We're still within the lease time for the preferred replica
* - The replica is still online/available
*
*
* If any of the above are not met, the leader node is returned.
*
* @param partition {@link TopicPartition} for which we want to fetch data
* @param leaderReplica {@link Node} for the leader of the given partition
* @param currentTimeMs Current time in milliseconds; used to determine if we're within the optional lease window
* @return Replica {@link Node node} from which to request the data
* @see SubscriptionState#preferredReadReplica
* @see SubscriptionState#updatePreferredReadReplica
*/
Node selectReadReplica(final TopicPartition partition, final Node leaderReplica, final long currentTimeMs) {
Optional nodeId = subscriptions.preferredReadReplica(partition, currentTimeMs);
if (nodeId.isPresent()) {
Optional node = nodeId.flatMap(id -> metadata.fetch().nodeIfOnline(partition, id));
if (node.isPresent()) {
return node.get();
} else {
log.trace("Not fetching from {} for partition {} since it is marked offline or is missing from our metadata," +
" using the leader instead.", nodeId, partition);
// Note that this condition may happen due to stale metadata, so we clear preferred replica and
// refresh metadata.
requestMetadataUpdate(metadata, subscriptions, partition);
return leaderReplica;
}
} else {
return leaderReplica;
}
}
protected Map prepareCloseFetchSessionRequests() {
final Cluster cluster = metadata.fetch();
Map fetchable = new HashMap<>();
sessionHandlers.forEach((fetchTargetNodeId, sessionHandler) -> {
// set the session handler to notify close. This will set the next metadata request to send close message.
sessionHandler.notifyClose();
// FetchTargetNode may not be available as it may have disconnected the connection. In such cases, we will
// skip sending the close request.
final Node fetchTarget = cluster.nodeById(fetchTargetNodeId);
if (fetchTarget == null || isUnavailable(fetchTarget)) {
log.debug("Skip sending close session request to broker {} since it is not reachable", fetchTarget);
return;
}
fetchable.put(fetchTarget, sessionHandler.newBuilder());
});
return fetchable.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().build()));
}
/**
* Create fetch requests for all nodes for which we have assigned partitions
* that have no existing requests in flight.
*/
protected Map prepareFetchRequests() {
// Update metrics in case there was an assignment change
metricsManager.maybeUpdateAssignment(subscriptions);
Map fetchable = new HashMap<>();
long currentTimeMs = time.milliseconds();
Map topicIds = metadata.topicIds();
for (TopicPartition partition : fetchablePartitions()) {
SubscriptionState.FetchPosition position = subscriptions.position(partition);
if (position == null)
throw new IllegalStateException("Missing position for fetchable partition " + partition);
Optional leaderOpt = position.currentLeader.leader;
if (!leaderOpt.isPresent()) {
log.debug("Requesting metadata update for partition {} since the position {} is missing the current leader node", partition, position);
metadata.requestUpdate(false);
continue;
}
// Use the preferred read replica if set, otherwise the partition's leader
Node node = selectReadReplica(partition, leaderOpt.get(), currentTimeMs);
if (isUnavailable(node)) {
maybeThrowAuthFailure(node);
// If we try to send during the reconnect backoff window, then the request is just
// going to be failed anyway before being sent, so skip sending the request for now
log.trace("Skipping fetch for partition {} because node {} is awaiting reconnect backoff", partition, node);
} else if (nodesWithPendingFetchRequests.contains(node.id())) {
log.trace("Skipping fetch for partition {} because previous request to {} has not been processed", partition, node);
} else {
// if there is a leader and no in-flight requests, issue a new fetch
FetchSessionHandler.Builder builder = fetchable.computeIfAbsent(node, k -> {
FetchSessionHandler fetchSessionHandler = sessionHandlers.computeIfAbsent(node.id(), n -> new FetchSessionHandler(logContext, n));
return fetchSessionHandler.newBuilder();
});
Uuid topicId = topicIds.getOrDefault(partition.topic(), Uuid.ZERO_UUID);
FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData(topicId,
position.offset,
FetchRequest.INVALID_LOG_START_OFFSET,
fetchConfig.fetchSize,
position.currentLeader.epoch,
Optional.empty());
builder.add(partition, partitionData);
log.debug("Added {} fetch request for partition {} at position {} to node {}", fetchConfig.isolationLevel,
partition, position, node);
}
}
return fetchable.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().build()));
}
// Visible for testing
protected FetchSessionHandler sessionHandler(int node) {
return sessionHandlers.get(node);
}
/**
* This method is called by {@link #close(Timer)} which is guarded by the {@link IdempotentCloser}) such as to only
* be executed once the first time that any of the {@link #close()} methods are called. Subclasses can override
* this method without the need for extra synchronization at the instance level.
*
* @param timer Timer to enforce time limit
*/
// Visible for testing
protected void closeInternal(Timer timer) {
// we do not need to re-enable wake-ups since we are closing already
Utils.closeQuietly(fetchBuffer, "fetchBuffer");
Utils.closeQuietly(decompressionBufferSupplier, "decompressionBufferSupplier");
}
public void close(final Timer timer) {
idempotentCloser.close(() -> closeInternal(timer));
}
@Override
public void close() {
close(time.timer(Duration.ZERO));
}
/**
* Defines the contract for handling fetch responses from brokers.
* @param Type of response, usually either {@link ClientResponse} or {@link Throwable}
*/
@FunctionalInterface
protected interface ResponseHandler {
/**
* Handle the response from the given {@link Node target}
*/
void handle(Node target, FetchSessionHandler.FetchRequestData data, T response);
}
}