Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer.internals;
import org.apache.kafka.clients.ClientResponse;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.errors.InvalidTopicException;
import org.apache.kafka.common.errors.RetriableException;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.errors.TopicAuthorizationException;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.MetadataRequest;
import org.apache.kafka.common.requests.MetadataResponse;
import org.apache.kafka.common.utils.LogContext;
import org.slf4j.Logger;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
import static org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.PollResult.EMPTY;
/**
*
* Manages the state of topic metadata requests. This manager returns a
* {@link NetworkClientDelegate.PollResult} when a request is ready to
* be sent. Specifically, this manager handles the following user API calls:
*
*
*
listTopics
*
partitionsFor
*
*
* The manager checks the state of the {@link TopicMetadataRequestState} before sending a new one to
* prevent sending it without backing off from previous attempts.
* Once a request is completed successfully or times out, its corresponding entry is removed.
*
*/
public class TopicMetadataRequestManager implements RequestManager {
private final boolean allowAutoTopicCreation;
private final List inflightRequests;
private final long retryBackoffMs;
private final long retryBackoffMaxMs;
private final Logger log;
private final LogContext logContext;
public TopicMetadataRequestManager(final LogContext context, final ConsumerConfig config) {
logContext = context;
log = logContext.logger(getClass());
inflightRequests = new LinkedList<>();
retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG);
retryBackoffMaxMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG);
allowAutoTopicCreation = config.getBoolean(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG);
}
@Override
public NetworkClientDelegate.PollResult poll(final long currentTimeMs) {
// Prune any requests which have timed out
List expiredRequests = inflightRequests.stream()
.filter(req -> req.isExpired(currentTimeMs))
.collect(Collectors.toList());
expiredRequests.forEach(TopicMetadataRequestState::expire);
List requests = inflightRequests.stream()
.map(req -> req.send(currentTimeMs))
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.toList());
return requests.isEmpty() ? EMPTY : new NetworkClientDelegate.PollResult(0, requests);
}
/**
* Return the future of the metadata request.
*
* @return the future of the metadata request.
*/
public CompletableFuture