Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer.internals;
import org.apache.kafka.clients.ClientResponse;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.RetriableCommitFailedException;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.GroupAuthorizationException;
import org.apache.kafka.common.errors.TopicAuthorizationException;
import org.apache.kafka.common.message.OffsetCommitRequestData;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.requests.OffsetCommitRequest;
import org.apache.kafka.common.requests.OffsetFetchRequest;
import org.apache.kafka.common.requests.OffsetFetchResponse;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Timer;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
public class CommitRequestManager implements RequestManager {
// TODO: current in ConsumerConfig but inaccessible in the internal package.
private static final String THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED = "internal.throw.on.fetch.stable.offset.unsupported";
// TODO: We will need to refactor the subscriptionState
private final SubscriptionState subscriptionState;
private final Logger log;
private final Optional autoCommitState;
private final CoordinatorRequestManager coordinatorRequestManager;
private final GroupState groupState;
private final long retryBackoffMs;
private final boolean throwOnFetchStableOffsetUnsupported;
final PendingRequests pendingRequests;
public CommitRequestManager(
final Time time,
final LogContext logContext,
final SubscriptionState subscriptionState,
final ConsumerConfig config,
final CoordinatorRequestManager coordinatorRequestManager,
final GroupState groupState) {
Objects.requireNonNull(coordinatorRequestManager, "Coordinator is needed upon committing offsets");
this.log = logContext.logger(getClass());
this.pendingRequests = new PendingRequests();
if (config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)) {
final long autoCommitInterval =
Integer.toUnsignedLong(config.getInt(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG));
this.autoCommitState = Optional.of(new AutoCommitState(time, autoCommitInterval));
} else {
this.autoCommitState = Optional.empty();
}
this.coordinatorRequestManager = coordinatorRequestManager;
this.groupState = groupState;
this.subscriptionState = subscriptionState;
this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG);
this.throwOnFetchStableOffsetUnsupported = config.getBoolean(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED);
}
/**
* Poll for the {@link OffsetFetchRequest} and {@link OffsetCommitRequest} request if there's any. The function will
* also try to autocommit the offsets, if feature is enabled.
*/
@Override
public NetworkClientDelegate.PollResult poll(final long currentTimeMs) {
// poll only when the coordinator node is known.
if (!coordinatorRequestManager.coordinator().isPresent()) {
return new NetworkClientDelegate.PollResult(Long.MAX_VALUE, Collections.emptyList());
}
maybeAutoCommit(this.subscriptionState.allConsumed());
if (!pendingRequests.hasUnsentRequests()) {
return new NetworkClientDelegate.PollResult(Long.MAX_VALUE, Collections.emptyList());
}
return new NetworkClientDelegate.PollResult(Long.MAX_VALUE,
Collections.unmodifiableList(pendingRequests.drain(currentTimeMs)));
}
public void maybeAutoCommit(final Map offsets) {
if (!autoCommitState.isPresent()) {
return;
}
AutoCommitState autocommit = autoCommitState.get();
if (!autocommit.canSendAutocommit()) {
return;
}
sendAutoCommit(offsets);
autocommit.resetTimer();
autocommit.setInflightCommitStatus(true);
}
/**
* Handles {@link org.apache.kafka.clients.consumer.internals.events.CommitApplicationEvent}. It creates an
* {@link OffsetCommitRequestState} and enqueue it to send later.
*/
public CompletableFuture addOffsetCommitRequest(final Map offsets) {
return pendingRequests.addOffsetCommitRequest(offsets);
}
/**
* Handles {@link org.apache.kafka.clients.consumer.internals.events.OffsetFetchApplicationEvent}. It creates an
* {@link OffsetFetchRequestState} and enqueue it to send later.
*/
public CompletableFuture