Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer.internals;
import org.apache.kafka.clients.ClientResponse;
import org.apache.kafka.clients.consumer.CommitFailedException;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.RetriableCommitFailedException;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.DisconnectException;
import org.apache.kafka.common.errors.GroupAuthorizationException;
import org.apache.kafka.common.errors.RetriableException;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.errors.TopicAuthorizationException;
import org.apache.kafka.common.message.OffsetCommitRequestData;
import org.apache.kafka.common.message.OffsetCommitResponseData;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.requests.OffsetCommitRequest;
import org.apache.kafka.common.requests.OffsetCommitResponse;
import org.apache.kafka.common.requests.OffsetFetchRequest;
import org.apache.kafka.common.requests.OffsetFetchResponse;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Timer;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalDouble;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.function.BiConsumer;
import java.util.stream.Collectors;
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED;
import static org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.PollResult.EMPTY;
import static org.apache.kafka.common.protocol.Errors.COORDINATOR_LOAD_IN_PROGRESS;
public class CommitRequestManager implements RequestManager, MemberStateListener {
private final SubscriptionState subscriptions;
private final LogContext logContext;
private final Logger log;
private final Optional autoCommitState;
private final CoordinatorRequestManager coordinatorRequestManager;
private final long retryBackoffMs;
private final String groupId;
private final Optional groupInstanceId;
private final long retryBackoffMaxMs;
// For testing only
private final OptionalDouble jitter;
private final boolean throwOnFetchStableOffsetUnsupported;
final PendingRequests pendingRequests;
private boolean closing = false;
/**
* Latest member ID and epoch received via the {@link #onMemberEpochUpdated(Optional, Optional)},
* to be included in the OffsetFetch and OffsetCommit requests if present. This will have
* the latest values received from the broker, or empty of the member is not part of the
* group anymore.
*/
private final MemberInfo memberInfo;
public CommitRequestManager(
final Time time,
final LogContext logContext,
final SubscriptionState subscriptions,
final ConsumerConfig config,
final CoordinatorRequestManager coordinatorRequestManager,
final String groupId,
final Optional groupInstanceId) {
this(time, logContext, subscriptions, config, coordinatorRequestManager, groupId,
groupInstanceId, config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG),
config.getLong(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG), OptionalDouble.empty());
}
// Visible for testing
CommitRequestManager(
final Time time,
final LogContext logContext,
final SubscriptionState subscriptions,
final ConsumerConfig config,
final CoordinatorRequestManager coordinatorRequestManager,
final String groupId,
final Optional groupInstanceId,
final long retryBackoffMs,
final long retryBackoffMaxMs,
final OptionalDouble jitter) {
Objects.requireNonNull(coordinatorRequestManager, "Coordinator is needed upon committing offsets");
this.logContext = logContext;
this.log = logContext.logger(getClass());
this.pendingRequests = new PendingRequests();
if (config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)) {
final long autoCommitInterval =
Integer.toUnsignedLong(config.getInt(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG));
this.autoCommitState = Optional.of(new AutoCommitState(time, autoCommitInterval));
} else {
this.autoCommitState = Optional.empty();
}
this.coordinatorRequestManager = coordinatorRequestManager;
this.groupId = groupId;
this.groupInstanceId = groupInstanceId;
this.subscriptions = subscriptions;
this.retryBackoffMs = retryBackoffMs;
this.retryBackoffMaxMs = retryBackoffMaxMs;
this.jitter = jitter;
this.throwOnFetchStableOffsetUnsupported = config.getBoolean(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED);
this.memberInfo = new MemberInfo();
}
/**
* Poll for the {@link OffsetFetchRequest} and {@link OffsetCommitRequest} request if there's any. The function will
* also try to autocommit the offsets, if feature is enabled.
*/
@Override
public NetworkClientDelegate.PollResult poll(final long currentTimeMs) {
// poll only when the coordinator node is known.
if (!coordinatorRequestManager.coordinator().isPresent())
return EMPTY;
if (closing) {
return drainPendingOffsetCommitRequests();
}
maybeAutoCommitAllConsumedAsync();
if (!pendingRequests.hasUnsentRequests())
return EMPTY;
List requests = pendingRequests.drain(currentTimeMs);
// min of the remainingBackoffMs of all the request that are still backing off
final long timeUntilNextPoll = Math.min(
findMinTime(unsentOffsetCommitRequests(), currentTimeMs),
findMinTime(unsentOffsetFetchRequests(), currentTimeMs));
return new NetworkClientDelegate.PollResult(timeUntilNextPoll, requests);
}
@Override
public void signalClose() {
closing = true;
}
/**
* Returns the delay for which the application thread can safely wait before it should be responsive
* to results from the request managers. For example, the subscription state can change when heartbeats
* are sent, so blocking for longer than the heartbeat interval might mean the application thread is not
* responsive to changes.
*/
@Override
public long maximumTimeToWait(long currentTimeMs) {
return autoCommitState.map(ac -> ac.remainingMs(currentTimeMs)).orElse(Long.MAX_VALUE);
}
private static long findMinTime(final Collection extends RequestState> requests, final long currentTimeMs) {
return requests.stream()
.mapToLong(request -> request.remainingBackoffMs(currentTimeMs))
.min()
.orElse(Long.MAX_VALUE);
}
/**
* Generate a request to commit offsets if auto-commit is enabled. The request will be
* returned to be sent out on the next call to {@link #poll(long)}. This will only generate a
* request if there is no other commit request already in-flight, and if the commit interval
* has elapsed.
*
* @param offsets Offsets to commit
* @param expirationTimeMs Time until which the request will continue to be retried if it
* fails with a retriable error. If not present, the request will be
* sent but not retried.
* @param checkInterval True if the auto-commit interval expiration should be checked for
* sending a request. If true, the request will be sent only if the
* auto-commit interval has expired. Pass false to
* send the auto-commit request regardless of the interval (ex.
* auto-commit before rebalance).
* @param retryOnStaleEpoch True if the request should be retried in case it fails with
* {@link Errors#STALE_MEMBER_EPOCH}.
* @return Future that will complete when a response is received for the request, or a
* completed future if no request is generated.
*/
private CompletableFuture maybeAutoCommit(final Map offsets,
final Optional expirationTimeMs,
boolean checkInterval,
boolean retryOnStaleEpoch) {
if (!autoCommitEnabled()) {
log.debug("Skipping auto-commit because auto-commit config is not enabled.");
return CompletableFuture.completedFuture(null);
}
AutoCommitState autocommit = autoCommitState.get();
if (checkInterval && !autocommit.shouldAutoCommit()) {
return CompletableFuture.completedFuture(null);
}
CompletableFuture result = addOffsetCommitRequest(offsets, expirationTimeMs, retryOnStaleEpoch)
.whenComplete(autoCommitCallback(offsets));
autocommit.resetTimer();
autocommit.setInflightCommitStatus(true);
return result;
}
/**
* If auto-commit is enabled, this will generate a commit offsets request for all assigned
* partitions and their current positions. Note on auto-commit timers: this will reset the
* auto-commit timer to the interval before issuing the async commit, and when the async commit
* completes, it will reset the auto-commit timer with the exponential backoff if the request
* failed with a retriable error.
*
* @return Future that will complete when a response is received for the request, or a
* completed future if no request is generated.
*/
public CompletableFuture maybeAutoCommitAllConsumedAsync() {
if (!autoCommitEnabled()) {
// Early return to ensure that no action/logging is performed.
return CompletableFuture.completedFuture(null);
}
Map offsets = subscriptions.allConsumed();
CompletableFuture result = maybeAutoCommit(offsets, Optional.empty(), true, true);
result.whenComplete((__, error) -> {
if (error != null) {
if (error instanceof RetriableCommitFailedException) {
log.debug("Asynchronous auto-commit of offsets {} failed due to retriable error.", offsets, error);
resetAutoCommitTimer(retryBackoffMs);
} else {
log.warn("Asynchronous auto-commit of offsets {} failed: {}", offsets, error.getMessage());
}
} else {
log.debug("Completed asynchronous auto-commit of offsets {}", offsets);
}
});
return result;
}
/**
* Commit consumed offsets if auto-commit is enabled. Retry while the timer is not expired,
* until the request succeeds or fails with a fatal error.
*/
public CompletableFuture maybeAutoCommitAllConsumedNow(
final Optional expirationTimeMs,
final boolean retryOnStaleEpoch) {
return maybeAutoCommit(subscriptions.allConsumed(), expirationTimeMs, false, retryOnStaleEpoch);
}
private BiConsumer super Void, ? super Throwable> autoCommitCallback(final Map allConsumedOffsets) {
return (response, throwable) -> {
autoCommitState.ifPresent(autoCommitState -> autoCommitState.setInflightCommitStatus(false));
if (throwable == null) {
log.debug("Completed asynchronous auto-commit of offsets {}", allConsumedOffsets);
} else if (throwable instanceof RetriableCommitFailedException) {
log.debug("Asynchronous auto-commit of offsets {} failed due to retriable error: {}",
allConsumedOffsets, throwable.getMessage());
} else {
log.warn("Asynchronous auto-commit of offsets {} failed", allConsumedOffsets, throwable);
}
};
}
/**
* Handles {@link org.apache.kafka.clients.consumer.internals.events.CommitApplicationEvent}. It creates an
* {@link OffsetCommitRequestState} and enqueue it to send later.
*/
public CompletableFuture addOffsetCommitRequest(final Map offsets,
final Optional expirationTimeMs,
final boolean retryOnStaleEpoch) {
if (offsets.isEmpty()) {
log.debug("Skipping commit of empty offsets");
return CompletableFuture.completedFuture(null);
}
return pendingRequests.addOffsetCommitRequest(offsets, expirationTimeMs, retryOnStaleEpoch).future;
}
/**
* Handles {@link org.apache.kafka.clients.consumer.internals.events.FetchCommittedOffsetsApplicationEvent}. It creates an
* {@link OffsetFetchRequestState} and enqueue it to send later.
*/
public CompletableFuture