org.apache.kafka.clients.admin.KafkaAdminClient Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of jena-fmod-kafka Show documentation
Show all versions of jena-fmod-kafka Show documentation
Apache Jena Fuseki server Kafka connector
The newest version!
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.clients.ApiVersions;
import org.apache.kafka.clients.ClientRequest;
import org.apache.kafka.clients.ClientResponse;
import org.apache.kafka.clients.ClientUtils;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.DefaultHostResolver;
import org.apache.kafka.clients.HostResolver;
import org.apache.kafka.clients.KafkaClient;
import org.apache.kafka.clients.LeastLoadedNode;
import org.apache.kafka.clients.MetadataRecoveryStrategy;
import org.apache.kafka.clients.NetworkClient;
import org.apache.kafka.clients.StaleMetadataException;
import org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig;
import org.apache.kafka.clients.admin.DeleteAclsResult.FilterResult;
import org.apache.kafka.clients.admin.DeleteAclsResult.FilterResults;
import org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo;
import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo;
import org.apache.kafka.clients.admin.OffsetSpec.TimestampSpec;
import org.apache.kafka.clients.admin.internals.AbortTransactionHandler;
import org.apache.kafka.clients.admin.internals.AdminApiDriver;
import org.apache.kafka.clients.admin.internals.AdminApiFuture;
import org.apache.kafka.clients.admin.internals.AdminApiFuture.SimpleAdminApiFuture;
import org.apache.kafka.clients.admin.internals.AdminApiHandler;
import org.apache.kafka.clients.admin.internals.AdminBootstrapAddresses;
import org.apache.kafka.clients.admin.internals.AdminMetadataManager;
import org.apache.kafka.clients.admin.internals.AllBrokersStrategy;
import org.apache.kafka.clients.admin.internals.AlterConsumerGroupOffsetsHandler;
import org.apache.kafka.clients.admin.internals.CoordinatorKey;
import org.apache.kafka.clients.admin.internals.DeleteConsumerGroupOffsetsHandler;
import org.apache.kafka.clients.admin.internals.DeleteConsumerGroupsHandler;
import org.apache.kafka.clients.admin.internals.DeleteRecordsHandler;
import org.apache.kafka.clients.admin.internals.DescribeConsumerGroupsHandler;
import org.apache.kafka.clients.admin.internals.DescribeProducersHandler;
import org.apache.kafka.clients.admin.internals.DescribeTransactionsHandler;
import org.apache.kafka.clients.admin.internals.FenceProducersHandler;
import org.apache.kafka.clients.admin.internals.ListConsumerGroupOffsetsHandler;
import org.apache.kafka.clients.admin.internals.ListOffsetsHandler;
import org.apache.kafka.clients.admin.internals.ListTransactionsHandler;
import org.apache.kafka.clients.admin.internals.RemoveMembersFromConsumerGroupHandler;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.internals.ConsumerProtocol;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.ConsumerGroupState;
import org.apache.kafka.common.GroupType;
import org.apache.kafka.common.ElectionType;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicCollection;
import org.apache.kafka.common.TopicCollection.TopicIdCollection;
import org.apache.kafka.common.TopicCollection.TopicNameCollection;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicPartitionInfo;
import org.apache.kafka.common.TopicPartitionReplica;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.errors.ApiException;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.DisconnectException;
import org.apache.kafka.common.errors.InvalidRequestException;
import org.apache.kafka.common.errors.InvalidTopicException;
import org.apache.kafka.common.errors.KafkaStorageException;
import org.apache.kafka.common.errors.MismatchedEndpointTypeException;
import org.apache.kafka.common.errors.RetriableException;
import org.apache.kafka.common.errors.ThrottlingQuotaExceededException;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.errors.UnacceptableCredentialException;
import org.apache.kafka.common.errors.UnknownServerException;
import org.apache.kafka.common.errors.UnknownTopicIdException;
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
import org.apache.kafka.common.errors.UnsupportedEndpointTypeException;
import org.apache.kafka.common.errors.UnsupportedSaslMechanismException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData;
import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignableTopic;
import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData;
import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData.AlterReplicaLogDir;
import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopic;
import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData.AlterReplicaLogDirPartitionResult;
import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData.AlterReplicaLogDirTopicResult;
import org.apache.kafka.common.message.AlterUserScramCredentialsRequestData;
import org.apache.kafka.common.message.ApiVersionsResponseData.FinalizedFeatureKey;
import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey;
import org.apache.kafka.common.message.CreateAclsRequestData;
import org.apache.kafka.common.message.CreateAclsRequestData.AclCreation;
import org.apache.kafka.common.message.CreateAclsResponseData.AclCreationResult;
import org.apache.kafka.common.message.CreateDelegationTokenRequestData;
import org.apache.kafka.common.message.CreateDelegationTokenRequestData.CreatableRenewers;
import org.apache.kafka.common.message.CreateDelegationTokenResponseData;
import org.apache.kafka.common.message.CreatePartitionsRequestData;
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsAssignment;
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic;
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopicCollection;
import org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult;
import org.apache.kafka.common.message.CreateTopicsRequestData;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCollection;
import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicConfigs;
import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult;
import org.apache.kafka.common.message.DeleteAclsRequestData;
import org.apache.kafka.common.message.DeleteAclsRequestData.DeleteAclsFilter;
import org.apache.kafka.common.message.DeleteAclsResponseData;
import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult;
import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsMatchingAcl;
import org.apache.kafka.common.message.DeleteTopicsRequestData;
import org.apache.kafka.common.message.DeleteTopicsRequestData.DeleteTopicState;
import org.apache.kafka.common.message.DeleteTopicsResponseData.DeletableTopicResult;
import org.apache.kafka.common.message.DescribeClusterRequestData;
import org.apache.kafka.common.message.DescribeClusterResponseData;
import org.apache.kafka.common.message.DescribeConfigsRequestData;
import org.apache.kafka.common.message.DescribeConfigsResponseData;
import org.apache.kafka.common.message.DescribeLogDirsRequestData;
import org.apache.kafka.common.message.DescribeLogDirsRequestData.DescribableLogDirTopic;
import org.apache.kafka.common.message.DescribeLogDirsResponseData;
import org.apache.kafka.common.message.DescribeQuorumResponseData;
import org.apache.kafka.common.message.DescribeTopicPartitionsRequestData;
import org.apache.kafka.common.message.DescribeTopicPartitionsRequestData.TopicRequest;
import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData;
import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponsePartition;
import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponseTopic;
import org.apache.kafka.common.message.DescribeUserScramCredentialsRequestData;
import org.apache.kafka.common.message.DescribeUserScramCredentialsRequestData.UserName;
import org.apache.kafka.common.message.DescribeUserScramCredentialsResponseData;
import org.apache.kafka.common.message.ExpireDelegationTokenRequestData;
import org.apache.kafka.common.message.GetTelemetrySubscriptionsRequestData;
import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity;
import org.apache.kafka.common.message.ListClientMetricsResourcesRequestData;
import org.apache.kafka.common.message.ListGroupsRequestData;
import org.apache.kafka.common.message.ListGroupsResponseData;
import org.apache.kafka.common.message.ListPartitionReassignmentsRequestData;
import org.apache.kafka.common.message.MetadataRequestData;
import org.apache.kafka.common.message.RenewDelegationTokenRequestData;
import org.apache.kafka.common.message.UnregisterBrokerRequestData;
import org.apache.kafka.common.message.UpdateFeaturesRequestData;
import org.apache.kafka.common.message.UpdateFeaturesResponseData.UpdatableFeatureResult;
import org.apache.kafka.common.metrics.KafkaMetricsContext;
import org.apache.kafka.common.metrics.MetricConfig;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.metrics.MetricsContext;
import org.apache.kafka.common.metrics.MetricsReporter;
import org.apache.kafka.common.metrics.Sensor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.quota.ClientQuotaAlteration;
import org.apache.kafka.common.quota.ClientQuotaEntity;
import org.apache.kafka.common.quota.ClientQuotaFilter;
import org.apache.kafka.common.requests.AbstractRequest;
import org.apache.kafka.common.requests.AbstractResponse;
import org.apache.kafka.common.requests.AlterClientQuotasRequest;
import org.apache.kafka.common.requests.AlterClientQuotasResponse;
import org.apache.kafka.common.requests.AlterConfigsRequest;
import org.apache.kafka.common.requests.AlterConfigsResponse;
import org.apache.kafka.common.requests.AlterPartitionReassignmentsRequest;
import org.apache.kafka.common.requests.AlterPartitionReassignmentsResponse;
import org.apache.kafka.common.requests.AlterReplicaLogDirsRequest;
import org.apache.kafka.common.requests.AlterReplicaLogDirsResponse;
import org.apache.kafka.common.requests.AlterUserScramCredentialsRequest;
import org.apache.kafka.common.requests.AlterUserScramCredentialsResponse;
import org.apache.kafka.common.requests.ApiError;
import org.apache.kafka.common.requests.ApiVersionsRequest;
import org.apache.kafka.common.requests.ApiVersionsResponse;
import org.apache.kafka.common.requests.CreateAclsRequest;
import org.apache.kafka.common.requests.CreateAclsResponse;
import org.apache.kafka.common.requests.CreateDelegationTokenRequest;
import org.apache.kafka.common.requests.CreateDelegationTokenResponse;
import org.apache.kafka.common.requests.CreatePartitionsRequest;
import org.apache.kafka.common.requests.CreatePartitionsResponse;
import org.apache.kafka.common.requests.CreateTopicsRequest;
import org.apache.kafka.common.requests.CreateTopicsResponse;
import org.apache.kafka.common.requests.DeleteAclsRequest;
import org.apache.kafka.common.requests.DeleteAclsResponse;
import org.apache.kafka.common.requests.DeleteTopicsRequest;
import org.apache.kafka.common.requests.DeleteTopicsResponse;
import org.apache.kafka.common.requests.DescribeAclsRequest;
import org.apache.kafka.common.requests.DescribeAclsResponse;
import org.apache.kafka.common.requests.DescribeClientQuotasRequest;
import org.apache.kafka.common.requests.DescribeClientQuotasResponse;
import org.apache.kafka.common.requests.DescribeClusterRequest;
import org.apache.kafka.common.requests.DescribeClusterResponse;
import org.apache.kafka.common.requests.DescribeConfigsRequest;
import org.apache.kafka.common.requests.DescribeConfigsResponse;
import org.apache.kafka.common.requests.DescribeDelegationTokenRequest;
import org.apache.kafka.common.requests.DescribeDelegationTokenResponse;
import org.apache.kafka.common.requests.DescribeLogDirsRequest;
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
import org.apache.kafka.common.requests.DescribeQuorumRequest;
import org.apache.kafka.common.requests.DescribeQuorumRequest.Builder;
import org.apache.kafka.common.requests.DescribeQuorumResponse;
import org.apache.kafka.common.requests.DescribeTopicPartitionsRequest;
import org.apache.kafka.common.requests.DescribeTopicPartitionsResponse;
import org.apache.kafka.common.requests.DescribeUserScramCredentialsRequest;
import org.apache.kafka.common.requests.DescribeUserScramCredentialsResponse;
import org.apache.kafka.common.requests.ElectLeadersRequest;
import org.apache.kafka.common.requests.ElectLeadersResponse;
import org.apache.kafka.common.requests.ExpireDelegationTokenRequest;
import org.apache.kafka.common.requests.ExpireDelegationTokenResponse;
import org.apache.kafka.common.requests.GetTelemetrySubscriptionsRequest;
import org.apache.kafka.common.requests.GetTelemetrySubscriptionsResponse;
import org.apache.kafka.common.requests.IncrementalAlterConfigsRequest;
import org.apache.kafka.common.requests.IncrementalAlterConfigsResponse;
import org.apache.kafka.common.requests.JoinGroupRequest;
import org.apache.kafka.common.requests.ListClientMetricsResourcesRequest;
import org.apache.kafka.common.requests.ListClientMetricsResourcesResponse;
import org.apache.kafka.common.requests.ListGroupsRequest;
import org.apache.kafka.common.requests.ListGroupsResponse;
import org.apache.kafka.common.requests.ListOffsetsRequest;
import org.apache.kafka.common.requests.ListPartitionReassignmentsRequest;
import org.apache.kafka.common.requests.ListPartitionReassignmentsResponse;
import org.apache.kafka.common.requests.MetadataRequest;
import org.apache.kafka.common.requests.MetadataResponse;
import org.apache.kafka.common.requests.RenewDelegationTokenRequest;
import org.apache.kafka.common.requests.RenewDelegationTokenResponse;
import org.apache.kafka.common.requests.UnregisterBrokerRequest;
import org.apache.kafka.common.requests.UnregisterBrokerResponse;
import org.apache.kafka.common.requests.UpdateFeaturesRequest;
import org.apache.kafka.common.requests.UpdateFeaturesResponse;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.security.scram.internals.ScramFormatter;
import org.apache.kafka.common.security.token.delegation.DelegationToken;
import org.apache.kafka.common.security.token.delegation.TokenInformation;
import org.apache.kafka.common.utils.AppInfoParser;
import org.apache.kafka.common.utils.ExponentialBackoff;
import org.apache.kafka.common.utils.KafkaThread;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.ProducerIdAndEpoch;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.kafka.common.internals.Topic.CLUSTER_METADATA_TOPIC_NAME;
import static org.apache.kafka.common.internals.Topic.CLUSTER_METADATA_TOPIC_PARTITION;
import static org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignablePartition;
import static org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignablePartitionResponse;
import static org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignableTopicResponse;
import static org.apache.kafka.common.message.ListPartitionReassignmentsRequestData.ListPartitionReassignmentsTopics;
import static org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingPartitionReassignment;
import static org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingTopicReassignment;
import static org.apache.kafka.common.requests.MetadataRequest.convertToMetadataRequestTopic;
import static org.apache.kafka.common.requests.MetadataRequest.convertTopicIdsToMetadataRequestTopic;
import static org.apache.kafka.common.utils.Utils.closeQuietly;
/**
* The default implementation of {@link Admin}. An instance of this class is created by invoking one of the
* {@code create()} methods in {@code AdminClient}. Users should not refer to this class directly.
*
*
* This class is thread-safe.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class KafkaAdminClient extends AdminClient {
/**
* The next integer to use to name a KafkaAdminClient which the user hasn't specified an explicit name for.
*/
private static final AtomicInteger ADMIN_CLIENT_ID_SEQUENCE = new AtomicInteger(1);
/**
* The prefix to use for the JMX metrics for this class
*/
private static final String JMX_PREFIX = "kafka.admin.client";
/**
* An invalid shutdown time which indicates that a shutdown has not yet been performed.
*/
private static final long INVALID_SHUTDOWN_TIME = -1;
/**
* The default reason for a LeaveGroupRequest.
*/
static final String DEFAULT_LEAVE_GROUP_REASON = "member was removed by an admin";
/**
* Thread name prefix for admin client network thread
*/
static final String NETWORK_THREAD_PREFIX = "kafka-admin-client-thread";
private final Logger log;
private final LogContext logContext;
/**
* The default timeout to use for an operation.
*/
private final int defaultApiTimeoutMs;
/**
* The timeout to use for a single request.
*/
private final int requestTimeoutMs;
/**
* The name of this AdminClient instance.
*/
private final String clientId;
/**
* Provides the time.
*/
private final Time time;
/**
* The cluster metadata manager used by the KafkaClient.
*/
private final AdminMetadataManager metadataManager;
/**
* The metrics for this KafkaAdminClient.
*/
final Metrics metrics;
/**
* The network client to use.
*/
private final KafkaClient client;
/**
* The runnable used in the service thread for this admin client.
*/
private final AdminClientRunnable runnable;
/**
* The network service thread for this admin client.
*/
private final Thread thread;
/**
* During a close operation, this is the time at which we will time out all pending operations
* and force the RPC thread to exit. If the admin client is not closing, this will be 0.
*/
private final AtomicLong hardShutdownTimeMs = new AtomicLong(INVALID_SHUTDOWN_TIME);
/**
* A factory which creates TimeoutProcessors for the RPC thread.
*/
private final TimeoutProcessorFactory timeoutProcessorFactory;
private final int maxRetries;
private final long retryBackoffMs;
private final long retryBackoffMaxMs;
private final ExponentialBackoff retryBackoff;
private final boolean clientTelemetryEnabled;
private final MetadataRecoveryStrategy metadataRecoveryStrategy;
/**
* The telemetry requests client instance id.
*/
private Uuid clientInstanceId;
/**
* Get or create a list value from a map.
*
* @param map The map to get or create the element from.
* @param key The key.
* @param The key type.
* @param The value type.
* @return The list value.
*/
static List getOrCreateListValue(Map> map, K key) {
return map.computeIfAbsent(key, k -> new LinkedList<>());
}
/**
* Send an exception to every element in a collection of KafkaFutureImpls.
*
* @param futures The collection of KafkaFutureImpl objects.
* @param exc The exception
* @param The KafkaFutureImpl result type.
*/
private static void completeAllExceptionally(Collection> futures, Throwable exc) {
completeAllExceptionally(futures.stream(), exc);
}
/**
* Send an exception to all futures in the provided stream
*
* @param futures The stream of KafkaFutureImpl objects.
* @param exc The exception
* @param The KafkaFutureImpl result type.
*/
private static void completeAllExceptionally(Stream> futures, Throwable exc) {
futures.forEach(future -> future.completeExceptionally(exc));
}
/**
* Get the current time remaining before a deadline as an integer.
*
* @param now The current time in milliseconds.
* @param deadlineMs The deadline time in milliseconds.
* @return The time delta in milliseconds.
*/
static int calcTimeoutMsRemainingAsInt(long now, long deadlineMs) {
long deltaMs = deadlineMs - now;
if (deltaMs > Integer.MAX_VALUE)
deltaMs = Integer.MAX_VALUE;
else if (deltaMs < Integer.MIN_VALUE)
deltaMs = Integer.MIN_VALUE;
return (int) deltaMs;
}
/**
* Generate the client id based on the configuration.
*
* @param config The configuration
*
* @return The client id
*/
static String generateClientId(AdminClientConfig config) {
String clientId = config.getString(AdminClientConfig.CLIENT_ID_CONFIG);
if (!clientId.isEmpty())
return clientId;
return "adminclient-" + ADMIN_CLIENT_ID_SEQUENCE.getAndIncrement();
}
String getClientId() {
return clientId;
}
/**
* Get the deadline for a particular call.
*
* @param now The current time in milliseconds.
* @param optionTimeoutMs The timeout option given by the user.
*
* @return The deadline in milliseconds.
*/
private long calcDeadlineMs(long now, Integer optionTimeoutMs) {
if (optionTimeoutMs != null)
return now + Math.max(0, optionTimeoutMs);
return now + defaultApiTimeoutMs;
}
/**
* Pretty-print an exception.
*
* @param throwable The exception.
*
* @return A compact human-readable string.
*/
static String prettyPrintException(Throwable throwable) {
if (throwable == null)
return "Null exception.";
if (throwable.getMessage() != null) {
return throwable.getClass().getSimpleName() + ": " + throwable.getMessage();
}
return throwable.getClass().getSimpleName();
}
static KafkaAdminClient createInternal(AdminClientConfig config, TimeoutProcessorFactory timeoutProcessorFactory) {
return createInternal(config, timeoutProcessorFactory, null);
}
static KafkaAdminClient createInternal(
AdminClientConfig config,
TimeoutProcessorFactory timeoutProcessorFactory,
HostResolver hostResolver
) {
Metrics metrics = null;
NetworkClient networkClient = null;
Time time = Time.SYSTEM;
String clientId = generateClientId(config);
ApiVersions apiVersions = new ApiVersions();
LogContext logContext = createLogContext(clientId);
try {
// Since we only request node information, it's safe to pass true for allowAutoTopicCreation (and it
// simplifies communication with older brokers)
AdminBootstrapAddresses adminAddresses = AdminBootstrapAddresses.fromConfig(config);
AdminMetadataManager metadataManager = new AdminMetadataManager(logContext,
config.getLong(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG),
config.getLong(AdminClientConfig.METADATA_MAX_AGE_CONFIG),
adminAddresses.usingBootstrapControllers());
metadataManager.update(Cluster.bootstrap(adminAddresses.addresses()), time.milliseconds());
List reporters = CommonClientConfigs.metricsReporters(clientId, config);
Map metricTags = Collections.singletonMap("client-id", clientId);
MetricConfig metricConfig = new MetricConfig().samples(config.getInt(AdminClientConfig.METRICS_NUM_SAMPLES_CONFIG))
.timeWindow(config.getLong(AdminClientConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS)
.recordLevel(Sensor.RecordingLevel.forName(config.getString(AdminClientConfig.METRICS_RECORDING_LEVEL_CONFIG)))
.tags(metricTags);
MetricsContext metricsContext = new KafkaMetricsContext(JMX_PREFIX,
config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX));
metrics = new Metrics(metricConfig, reporters, time, metricsContext);
networkClient = ClientUtils.createNetworkClient(config,
clientId,
metrics,
"admin-client",
logContext,
apiVersions,
time,
1,
(int) TimeUnit.HOURS.toMillis(1),
metadataManager.updater(),
(hostResolver == null) ? new DefaultHostResolver() : hostResolver);
return new KafkaAdminClient(config, clientId, time, metadataManager, metrics, networkClient,
timeoutProcessorFactory, logContext);
} catch (Throwable exc) {
closeQuietly(metrics, "Metrics");
closeQuietly(networkClient, "NetworkClient");
throw new KafkaException("Failed to create new KafkaAdminClient", exc);
}
}
// Visible for tests
static KafkaAdminClient createInternal(AdminClientConfig config,
AdminMetadataManager metadataManager,
KafkaClient client,
Time time) {
Metrics metrics = null;
String clientId = generateClientId(config);
try {
metrics = new Metrics(new MetricConfig(), new LinkedList<>(), time);
LogContext logContext = createLogContext(clientId);
return new KafkaAdminClient(config, clientId, time, metadataManager, metrics,
client, null, logContext);
} catch (Throwable exc) {
closeQuietly(metrics, "Metrics");
throw new KafkaException("Failed to create new KafkaAdminClient", exc);
}
}
static LogContext createLogContext(String clientId) {
return new LogContext("[AdminClient clientId=" + clientId + "] ");
}
private KafkaAdminClient(AdminClientConfig config,
String clientId,
Time time,
AdminMetadataManager metadataManager,
Metrics metrics,
KafkaClient client,
TimeoutProcessorFactory timeoutProcessorFactory,
LogContext logContext) {
this.clientId = clientId;
this.log = logContext.logger(KafkaAdminClient.class);
this.logContext = logContext;
this.requestTimeoutMs = config.getInt(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG);
this.defaultApiTimeoutMs = configureDefaultApiTimeoutMs(config);
this.time = time;
this.metadataManager = metadataManager;
this.metrics = metrics;
this.client = client;
this.runnable = new AdminClientRunnable();
String threadName = NETWORK_THREAD_PREFIX + " | " + clientId;
this.thread = new KafkaThread(threadName, runnable, true);
this.timeoutProcessorFactory = (timeoutProcessorFactory == null) ?
new TimeoutProcessorFactory() : timeoutProcessorFactory;
this.maxRetries = config.getInt(AdminClientConfig.RETRIES_CONFIG);
this.retryBackoffMs = config.getLong(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG);
this.retryBackoffMaxMs = config.getLong(AdminClientConfig.RETRY_BACKOFF_MAX_MS_CONFIG);
this.retryBackoff = new ExponentialBackoff(
retryBackoffMs,
CommonClientConfigs.RETRY_BACKOFF_EXP_BASE,
retryBackoffMaxMs,
CommonClientConfigs.RETRY_BACKOFF_JITTER);
this.clientTelemetryEnabled = config.getBoolean(AdminClientConfig.ENABLE_METRICS_PUSH_CONFIG);
this.metadataRecoveryStrategy = MetadataRecoveryStrategy.forName(config.getString(AdminClientConfig.METADATA_RECOVERY_STRATEGY_CONFIG));
config.logUnused();
AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds());
log.debug("Kafka admin client initialized");
thread.start();
}
/**
* If a default.api.timeout.ms has been explicitly specified, raise an error if it conflicts with request.timeout.ms.
* If no default.api.timeout.ms has been configured, then set its value as the max of the default and request.timeout.ms. Also we should probably log a warning.
* Otherwise, use the provided values for both configurations.
*
* @param config The configuration
*/
private int configureDefaultApiTimeoutMs(AdminClientConfig config) {
int requestTimeoutMs = config.getInt(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG);
int defaultApiTimeoutMs = config.getInt(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG);
if (defaultApiTimeoutMs < requestTimeoutMs) {
if (config.originals().containsKey(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG)) {
throw new ConfigException("The specified value of " + AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG +
" must be no smaller than the value of " + AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG + ".");
} else {
log.warn("Overriding the default value for {} ({}) with the explicitly configured request timeout {}",
AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, this.defaultApiTimeoutMs,
requestTimeoutMs);
return requestTimeoutMs;
}
}
return defaultApiTimeoutMs;
}
@Override
public void close(Duration timeout) {
long waitTimeMs = timeout.toMillis();
if (waitTimeMs < 0)
throw new IllegalArgumentException("The timeout cannot be negative.");
waitTimeMs = Math.min(TimeUnit.DAYS.toMillis(365), waitTimeMs); // Limit the timeout to a year.
long now = time.milliseconds();
long newHardShutdownTimeMs = now + waitTimeMs;
long prev = INVALID_SHUTDOWN_TIME;
while (true) {
if (hardShutdownTimeMs.compareAndSet(prev, newHardShutdownTimeMs)) {
if (prev == INVALID_SHUTDOWN_TIME) {
log.debug("Initiating close operation.");
} else {
log.debug("Moving hard shutdown time forward.");
}
client.wakeup(); // Wake the thread, if it is blocked inside poll().
break;
}
prev = hardShutdownTimeMs.get();
if (prev < newHardShutdownTimeMs) {
log.debug("Hard shutdown time is already earlier than requested.");
newHardShutdownTimeMs = prev;
break;
}
}
if (log.isDebugEnabled()) {
long deltaMs = Math.max(0, newHardShutdownTimeMs - time.milliseconds());
log.debug("Waiting for the I/O thread to exit. Hard shutdown in {} ms.", deltaMs);
}
try {
// close() can be called by AdminClient thread when it invokes callback. That will
// cause deadlock, so check for that condition.
if (Thread.currentThread() != thread) {
// Wait for the thread to be joined.
thread.join(waitTimeMs);
}
log.debug("Kafka admin client closed.");
} catch (InterruptedException e) {
log.debug("Interrupted while joining I/O thread", e);
Thread.currentThread().interrupt();
}
}
/**
* An interface for providing a node for a call.
*/
private interface NodeProvider {
Node provide();
boolean supportsUseControllers();
}
private class MetadataUpdateNodeIdProvider implements NodeProvider {
@Override
public Node provide() {
LeastLoadedNode leastLoadedNode = client.leastLoadedNode(time.milliseconds());
if (metadataRecoveryStrategy == MetadataRecoveryStrategy.REBOOTSTRAP
&& !leastLoadedNode.hasNodeAvailableOrConnectionReady()) {
metadataManager.rebootstrap(time.milliseconds());
}
return leastLoadedNode.node();
}
@Override
public boolean supportsUseControllers() {
return true;
}
}
private class ConstantNodeIdProvider implements NodeProvider {
private final int nodeId;
private final boolean supportsUseControllers;
ConstantNodeIdProvider(int nodeId, boolean supportsUseControllers) {
this.nodeId = nodeId;
this.supportsUseControllers = supportsUseControllers;
}
ConstantNodeIdProvider(int nodeId) {
this.nodeId = nodeId;
this.supportsUseControllers = false;
}
@Override
public Node provide() {
if (metadataManager.isReady() &&
(metadataManager.nodeById(nodeId) != null)) {
return metadataManager.nodeById(nodeId);
}
// If we can't find the node with the given constant ID, we schedule a
// metadata update and hope it appears. This behavior is useful for avoiding
// flaky behavior in tests when the cluster is starting up and not all nodes
// have appeared.
metadataManager.requestUpdate();
return null;
}
@Override
public boolean supportsUseControllers() {
return supportsUseControllers;
}
}
/**
* Provides the controller node.
*/
private class ControllerNodeProvider implements NodeProvider {
private final boolean supportsUseControllers;
ControllerNodeProvider(boolean supportsUseControllers) {
this.supportsUseControllers = supportsUseControllers;
}
ControllerNodeProvider() {
this.supportsUseControllers = false;
}
@Override
public Node provide() {
if (metadataManager.isReady() &&
(metadataManager.controller() != null)) {
return metadataManager.controller();
}
metadataManager.requestUpdate();
return null;
}
@Override
public boolean supportsUseControllers() {
return supportsUseControllers;
}
}
/**
* Provides the least loaded node.
*/
private class LeastLoadedNodeProvider implements NodeProvider {
@Override
public Node provide() {
if (metadataManager.isReady()) {
// This may return null if all nodes are busy.
// In that case, we will postpone node assignment.
return client.leastLoadedNode(time.milliseconds()).node();
}
metadataManager.requestUpdate();
return null;
}
@Override
public boolean supportsUseControllers() {
return false;
}
}
/**
* Provides the least loaded broker, or the active kcontroller if we're using
* bootstrap.controllers.
*/
private class ConstantBrokerOrActiveKController implements NodeProvider {
private final int nodeId;
ConstantBrokerOrActiveKController(int nodeId) {
this.nodeId = nodeId;
}
@Override
public Node provide() {
if (metadataManager.isReady()) {
if (metadataManager.usingBootstrapControllers()) {
return metadataManager.controller();
} else if (metadataManager.nodeById(nodeId) != null) {
return metadataManager.nodeById(nodeId);
}
}
metadataManager.requestUpdate();
return null;
}
@Override
public boolean supportsUseControllers() {
return true;
}
}
/**
* Provides the least loaded broker, or the active kcontroller if we're using
* bootstrap.controllers.
*/
private class LeastLoadedBrokerOrActiveKController implements NodeProvider {
@Override
public Node provide() {
if (metadataManager.isReady()) {
if (metadataManager.usingBootstrapControllers()) {
return metadataManager.controller();
} else {
// This may return null if all nodes are busy.
// In that case, we will postpone node assignment.
return client.leastLoadedNode(time.milliseconds()).node();
}
}
metadataManager.requestUpdate();
return null;
}
@Override
public boolean supportsUseControllers() {
return true;
}
}
abstract class Call {
private final boolean internal;
private final String callName;
private final long deadlineMs;
private final NodeProvider nodeProvider;
protected int tries;
private Node curNode = null;
private long nextAllowedTryMs;
Call(boolean internal,
String callName,
long nextAllowedTryMs,
int tries,
long deadlineMs,
NodeProvider nodeProvider
) {
this.internal = internal;
this.callName = callName;
this.nextAllowedTryMs = nextAllowedTryMs;
this.tries = tries;
this.deadlineMs = deadlineMs;
this.nodeProvider = nodeProvider;
}
Call(boolean internal, String callName, long deadlineMs, NodeProvider nodeProvider) {
this(internal, callName, 0, 0, deadlineMs, nodeProvider);
}
Call(String callName, long deadlineMs, NodeProvider nodeProvider) {
this(false, callName, 0, 0, deadlineMs, nodeProvider);
}
Call(String callName, long nextAllowedTryMs, int tries, long deadlineMs, NodeProvider nodeProvider) {
this(false, callName, nextAllowedTryMs, tries, deadlineMs, nodeProvider);
}
protected Node curNode() {
return curNode;
}
/**
* Handle a failure.
*
* Depending on what the exception is and how many times we have already tried, we may choose to
* fail the Call, or retry it. It is important to print the stack traces here in some cases,
* since they are not necessarily preserved in ApiVersionException objects.
*
* @param now The current time in milliseconds.
* @param throwable The failure exception.
*/
final void fail(long now, Throwable throwable) {
if (curNode != null) {
runnable.nodeReadyDeadlines.remove(curNode);
curNode = null;
}
// If the admin client is closing, we can't retry.
if (runnable.closing) {
handleFailure(throwable);
return;
}
// If this is an UnsupportedVersionException that we can retry, do so. Note that a
// protocol downgrade will not count against the total number of retries we get for
// this RPC. That is why 'tries' is not incremented.
if ((throwable instanceof UnsupportedVersionException) &&
handleUnsupportedVersionException((UnsupportedVersionException) throwable)) {
log.debug("{} attempting protocol downgrade and then retry.", this);
runnable.pendingCalls.add(this);
return;
}
nextAllowedTryMs = now + retryBackoff.backoff(tries++);
// If the call has timed out, fail.
if (calcTimeoutMsRemainingAsInt(now, deadlineMs) <= 0) {
handleTimeoutFailure(now, throwable);
return;
}
// If the exception is not retriable, fail.
if (!(throwable instanceof RetriableException)) {
if (log.isDebugEnabled()) {
log.debug("{} failed with non-retriable exception after {} attempt(s)", this, tries,
new Exception(prettyPrintException(throwable)));
}
handleFailure(throwable);
return;
}
// If we are out of retries, fail.
if (tries > maxRetries) {
handleTimeoutFailure(now, throwable);
return;
}
if (log.isDebugEnabled()) {
log.debug("{} failed: {}. Beginning retry #{}",
this, prettyPrintException(throwable), tries);
}
maybeRetry(now, throwable);
}
void maybeRetry(long now, Throwable throwable) {
runnable.pendingCalls.add(this);
}
private void handleTimeoutFailure(long now, Throwable cause) {
if (log.isDebugEnabled()) {
log.debug("{} timed out at {} after {} attempt(s)", this, now, tries,
new Exception(prettyPrintException(cause)));
}
if (cause instanceof TimeoutException) {
handleFailure(cause);
} else {
handleFailure(new TimeoutException(this + " timed out at " + now
+ " after " + tries + " attempt(s)", cause));
}
}
/**
* Create an AbstractRequest.Builder for this Call.
*
* @param timeoutMs The timeout in milliseconds.
*
* @return The AbstractRequest builder.
*/
abstract AbstractRequest.Builder> createRequest(int timeoutMs);
/**
* Process the call response.
*
* @param abstractResponse The AbstractResponse.
*
*/
abstract void handleResponse(AbstractResponse abstractResponse);
/**
* Handle a failure. This will only be called if the failure exception was not
* retriable, or if we hit a timeout.
*
* @param throwable The exception.
*/
abstract void handleFailure(Throwable throwable);
/**
* Handle an UnsupportedVersionException.
*
* @param exception The exception.
*
* @return True if the exception can be handled; false otherwise.
*/
boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
return false;
}
@Override
public String toString() {
return "Call(callName=" + callName + ", deadlineMs=" + deadlineMs +
", tries=" + tries + ", nextAllowedTryMs=" + nextAllowedTryMs + ")";
}
public boolean isInternal() {
return internal;
}
}
static class TimeoutProcessorFactory {
TimeoutProcessor create(long now) {
return new TimeoutProcessor(now);
}
}
static class TimeoutProcessor {
/**
* The current time in milliseconds.
*/
private final long now;
/**
* The number of milliseconds until the next timeout.
*/
private int nextTimeoutMs;
/**
* Create a new timeout processor.
*
* @param now The current time in milliseconds since the epoch.
*/
TimeoutProcessor(long now) {
this.now = now;
this.nextTimeoutMs = Integer.MAX_VALUE;
}
/**
* Check for calls which have timed out.
* Timed out calls will be removed and failed.
* The remaining milliseconds until the next timeout will be updated.
*
* @param calls The collection of calls.
*
* @return The number of calls which were timed out.
*/
int handleTimeouts(Collection calls, String msg) {
int numTimedOut = 0;
for (Iterator iter = calls.iterator(); iter.hasNext(); ) {
Call call = iter.next();
int remainingMs = calcTimeoutMsRemainingAsInt(now, call.deadlineMs);
if (remainingMs < 0) {
call.fail(now, new TimeoutException(msg + " Call: " + call.callName));
iter.remove();
numTimedOut++;
} else {
nextTimeoutMs = Math.min(nextTimeoutMs, remainingMs);
}
}
return numTimedOut;
}
/**
* Check whether a call should be timed out.
* The remaining milliseconds until the next timeout will be updated.
*
* @param call The call.
*
* @return True if the call should be timed out.
*/
boolean callHasExpired(Call call) {
int remainingMs = calcTimeoutMsRemainingAsInt(now, call.deadlineMs);
if (remainingMs < 0)
return true;
nextTimeoutMs = Math.min(nextTimeoutMs, remainingMs);
return false;
}
int nextTimeoutMs() {
return nextTimeoutMs;
}
}
private final class AdminClientRunnable implements Runnable {
/**
* Calls which have not yet been assigned to a node.
* Only accessed from this thread.
*/
private final ArrayList pendingCalls = new ArrayList<>();
/**
* Maps nodes to calls that we want to send.
* Only accessed from this thread.
*/
private final Map> callsToSend = new HashMap<>();
/**
* Maps node ID strings to calls that have been sent.
* Only accessed from this thread.
*/
private final Map callsInFlight = new HashMap<>();
/**
* Maps correlation IDs to calls that have been sent.
* Only accessed from this thread.
*/
private final Map correlationIdToCalls = new HashMap<>();
/**
* Pending calls. Protected by the object monitor.
*/
private final List newCalls = new LinkedList<>();
/**
* Maps node ID strings to their readiness deadlines. A node will appear in this
* map if there are callsToSend which are waiting for it to be ready, and there
* are no calls in flight using the node.
*/
private final Map nodeReadyDeadlines = new HashMap<>();
/**
* Whether the admin client is closing.
*/
private volatile boolean closing = false;
/**
* Time out the elements in the pendingCalls list which are expired.
*
* @param processor The timeout processor.
*/
private void timeoutPendingCalls(TimeoutProcessor processor) {
int numTimedOut = processor.handleTimeouts(pendingCalls, "Timed out waiting for a node assignment.");
if (numTimedOut > 0)
log.debug("Timed out {} pending calls.", numTimedOut);
}
/**
* Time out calls which have been assigned to nodes.
*
* @param processor The timeout processor.
*/
private int timeoutCallsToSend(TimeoutProcessor processor) {
int numTimedOut = 0;
for (List callList : callsToSend.values()) {
numTimedOut += processor.handleTimeouts(callList,
"Timed out waiting to send the call.");
}
if (numTimedOut > 0)
log.debug("Timed out {} call(s) with assigned nodes.", numTimedOut);
return numTimedOut;
}
/**
* Drain all the calls from newCalls into pendingCalls.
*
* This function holds the lock for the minimum amount of time, to avoid blocking
* users of AdminClient who will also take the lock to add new calls.
*/
private synchronized void drainNewCalls() {
transitionToPendingAndClearList(newCalls);
}
/**
* Add some calls to pendingCalls, and then clear the input list.
* Also clears Call#curNode.
*
* @param calls The calls to add.
*/
private void transitionToPendingAndClearList(List calls) {
for (Call call : calls) {
call.curNode = null;
pendingCalls.add(call);
}
calls.clear();
}
/**
* Choose nodes for the calls in the pendingCalls list.
*
* @param now The current time in milliseconds.
* @return The minimum time until a call is ready to be retried if any of the pending
* calls are backing off after a failure
*/
private long maybeDrainPendingCalls(long now) {
long pollTimeout = Long.MAX_VALUE;
log.trace("Trying to choose nodes for {} at {}", pendingCalls, now);
Iterator pendingIter = pendingCalls.iterator();
while (pendingIter.hasNext()) {
Call call = pendingIter.next();
// If the call is being retried, await the proper backoff before finding the node
if (now < call.nextAllowedTryMs) {
pollTimeout = Math.min(pollTimeout, call.nextAllowedTryMs - now);
} else if (maybeDrainPendingCall(call, now)) {
pendingIter.remove();
}
}
return pollTimeout;
}
/**
* Check whether a pending call can be assigned a node. Return true if the pending call was either
* transferred to the callsToSend collection or if the call was failed. Return false if it
* should remain pending.
*/
private boolean maybeDrainPendingCall(Call call, long now) {
try {
Node node = call.nodeProvider.provide();
if (node != null) {
log.trace("Assigned {} to node {}", call, node);
call.curNode = node;
getOrCreateListValue(callsToSend, node).add(call);
return true;
} else {
log.trace("Unable to assign {} to a node.", call);
return false;
}
} catch (Throwable t) {
// Handle authentication errors while choosing nodes.
log.debug("Unable to choose node for {}", call, t);
call.fail(now, t);
return true;
}
}
/**
* Send the calls which are ready.
*
* @param now The current time in milliseconds.
* @return The minimum timeout we need for poll().
*/
private long sendEligibleCalls(long now) {
long pollTimeout = Long.MAX_VALUE;
for (Iterator>> iter = callsToSend.entrySet().iterator(); iter.hasNext(); ) {
Map.Entry> entry = iter.next();
List calls = entry.getValue();
if (calls.isEmpty()) {
iter.remove();
continue;
}
Node node = entry.getKey();
if (callsInFlight.containsKey(node.idString())) {
log.trace("Still waiting for other calls to finish on node {}.", node);
nodeReadyDeadlines.remove(node);
continue;
}
if (!client.ready(node, now)) {
Long deadline = nodeReadyDeadlines.get(node);
if (deadline != null) {
if (now >= deadline) {
log.info("Disconnecting from {} and revoking {} node assignment(s) " +
"because the node is taking too long to become ready.",
node.idString(), calls.size());
transitionToPendingAndClearList(calls);
client.disconnect(node.idString());
nodeReadyDeadlines.remove(node);
iter.remove();
continue;
}
pollTimeout = Math.min(pollTimeout, deadline - now);
} else {
nodeReadyDeadlines.put(node, now + requestTimeoutMs);
}
long nodeTimeout = client.pollDelayMs(node, now);
pollTimeout = Math.min(pollTimeout, nodeTimeout);
log.trace("Client is not ready to send to {}. Must delay {} ms", node, nodeTimeout);
continue;
}
// Subtract the time we spent waiting for the node to become ready from
// the total request time.
int remainingRequestTime;
Long deadlineMs = nodeReadyDeadlines.remove(node);
if (deadlineMs == null) {
remainingRequestTime = requestTimeoutMs;
} else {
remainingRequestTime = calcTimeoutMsRemainingAsInt(now, deadlineMs);
}
while (!calls.isEmpty()) {
Call call = calls.remove(0);
int timeoutMs = Math.min(remainingRequestTime,
calcTimeoutMsRemainingAsInt(now, call.deadlineMs));
AbstractRequest.Builder> requestBuilder;
try {
requestBuilder = call.createRequest(timeoutMs);
} catch (Throwable t) {
call.fail(now, new KafkaException(String.format(
"Internal error sending %s to %s.", call.callName, node), t));
continue;
}
ClientRequest clientRequest = client.newClientRequest(node.idString(),
requestBuilder, now, true, timeoutMs, null);
log.debug("Sending {} to {}. correlationId={}, timeoutMs={}",
requestBuilder, node, clientRequest.correlationId(), timeoutMs);
client.send(clientRequest, now);
callsInFlight.put(node.idString(), call);
correlationIdToCalls.put(clientRequest.correlationId(), call);
break;
}
}
return pollTimeout;
}
/**
* Time out expired calls that are in flight.
*
* Calls that are in flight may have been partially or completely sent over the wire. They may
* even be in the process of being processed by the remote server. At the moment, our only option
* to time them out is to close the entire connection.
*
* @param processor The timeout processor.
*/
private void timeoutCallsInFlight(TimeoutProcessor processor) {
int numTimedOut = 0;
for (Map.Entry entry : callsInFlight.entrySet()) {
Call call = entry.getValue();
String nodeId = entry.getKey();
if (processor.callHasExpired(call)) {
log.info("Disconnecting from {} due to timeout while awaiting {}", nodeId, call);
client.disconnect(nodeId);
numTimedOut++;
// We don't remove anything from the callsInFlight data structure. Because the connection
// has been closed, the calls should be returned by the next client#poll(),
// and handled at that point.
}
}
if (numTimedOut > 0)
log.debug("Timed out {} call(s) in flight.", numTimedOut);
}
/**
* Handle responses from the server.
*
* @param now The current time in milliseconds.
* @param responses The latest responses from KafkaClient.
*/
private void handleResponses(long now, List responses) {
for (ClientResponse response : responses) {
int correlationId = response.requestHeader().correlationId();
Call call = correlationIdToCalls.get(correlationId);
if (call == null) {
// If the server returns information about a correlation ID we didn't use yet,
// an internal server error has occurred. Close the connection and log an error message.
log.error("Internal server error on {}: server returned information about unknown " +
"correlation ID {}, requestHeader = {}", response.destination(), correlationId,
response.requestHeader());
client.disconnect(response.destination());
continue;
}
// Stop tracking this call.
correlationIdToCalls.remove(correlationId);
if (!callsInFlight.remove(response.destination(), call)) {
log.error("Internal server error on {}: ignoring call {} in correlationIdToCall " +
"that did not exist in callsInFlight", response.destination(), call);
continue;
}
// Handle the result of the call. This may involve retrying the call, if we got a
// retriable exception.
if (response.versionMismatch() != null) {
call.fail(now, response.versionMismatch());
} else if (response.wasDisconnected()) {
AuthenticationException authException = client.authenticationException(call.curNode());
if (authException != null) {
call.fail(now, authException);
} else {
call.fail(now, new DisconnectException(String.format(
"Cancelled %s request with correlation id %d due to node %s being disconnected",
call.callName, correlationId, response.destination())));
}
} else {
try {
call.handleResponse(response.responseBody());
if (log.isTraceEnabled())
log.trace("{} got response {}", call, response.responseBody());
} catch (Throwable t) {
if (log.isTraceEnabled())
log.trace("{} handleResponse failed with {}", call, prettyPrintException(t));
call.fail(now, t);
}
}
}
}
/**
* Unassign calls that have not yet been sent based on some predicate. For example, this
* is used to reassign the calls that have been assigned to a disconnected node.
*
* @param shouldUnassign Condition for reassignment. If the predicate is true, then the calls will
* be put back in the pendingCalls collection and they will be reassigned
*/
private void unassignUnsentCalls(Predicate shouldUnassign) {
for (Iterator>> iter = callsToSend.entrySet().iterator(); iter.hasNext(); ) {
Map.Entry> entry = iter.next();
Node node = entry.getKey();
List awaitingCalls = entry.getValue();
if (awaitingCalls.isEmpty()) {
iter.remove();
} else if (shouldUnassign.test(node)) {
nodeReadyDeadlines.remove(node);
transitionToPendingAndClearList(awaitingCalls);
iter.remove();
}
}
}
private boolean hasActiveExternalCalls(Collection calls) {
for (Call call : calls) {
if (!call.isInternal()) {
return true;
}
}
return false;
}
/**
* Return true if there are currently active external calls.
*/
private boolean hasActiveExternalCalls() {
if (hasActiveExternalCalls(pendingCalls)) {
return true;
}
for (List callList : callsToSend.values()) {
if (hasActiveExternalCalls(callList)) {
return true;
}
}
return hasActiveExternalCalls(correlationIdToCalls.values());
}
private boolean threadShouldExit(long now, long curHardShutdownTimeMs) {
if (!hasActiveExternalCalls()) {
log.trace("All work has been completed, and the I/O thread is now exiting.");
return true;
}
if (now >= curHardShutdownTimeMs) {
log.info("Forcing a hard I/O thread shutdown. Requests in progress will be aborted.");
return true;
}
log.debug("Hard shutdown in {} ms.", curHardShutdownTimeMs - now);
return false;
}
@Override
public void run() {
log.debug("Thread starting");
try {
processRequests();
} finally {
closing = true;
AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics);
int numTimedOut = 0;
TimeoutProcessor timeoutProcessor = new TimeoutProcessor(Long.MAX_VALUE);
synchronized (this) {
numTimedOut += timeoutProcessor.handleTimeouts(newCalls, "The AdminClient thread has exited.");
}
numTimedOut += timeoutProcessor.handleTimeouts(pendingCalls, "The AdminClient thread has exited.");
numTimedOut += timeoutCallsToSend(timeoutProcessor);
numTimedOut += timeoutProcessor.handleTimeouts(correlationIdToCalls.values(),
"The AdminClient thread has exited.");
if (numTimedOut > 0) {
log.info("Timed out {} remaining operation(s) during close.", numTimedOut);
}
closeQuietly(client, "KafkaClient");
closeQuietly(metrics, "Metrics");
log.debug("Exiting AdminClientRunnable thread.");
}
}
private void processRequests() {
long now = time.milliseconds();
while (true) {
// Copy newCalls into pendingCalls.
drainNewCalls();
// Check if the AdminClient thread should shut down.
long curHardShutdownTimeMs = hardShutdownTimeMs.get();
if ((curHardShutdownTimeMs != INVALID_SHUTDOWN_TIME) && threadShouldExit(now, curHardShutdownTimeMs))
break;
// Handle timeouts.
TimeoutProcessor timeoutProcessor = timeoutProcessorFactory.create(now);
timeoutPendingCalls(timeoutProcessor);
timeoutCallsToSend(timeoutProcessor);
timeoutCallsInFlight(timeoutProcessor);
long pollTimeout = Math.min(1200000, timeoutProcessor.nextTimeoutMs());
if (curHardShutdownTimeMs != INVALID_SHUTDOWN_TIME) {
pollTimeout = Math.min(pollTimeout, curHardShutdownTimeMs - now);
}
// Choose nodes for our pending calls.
pollTimeout = Math.min(pollTimeout, maybeDrainPendingCalls(now));
long metadataFetchDelayMs = metadataManager.metadataFetchDelayMs(now);
if (metadataFetchDelayMs == 0) {
metadataManager.transitionToUpdatePending(now);
Call metadataCall = makeMetadataCall(now);
// Create a new metadata fetch call and add it to the end of pendingCalls.
// Assign a node for just the new call (we handled the other pending nodes above).
if (!maybeDrainPendingCall(metadataCall, now))
pendingCalls.add(metadataCall);
}
pollTimeout = Math.min(pollTimeout, sendEligibleCalls(now));
if (metadataFetchDelayMs > 0) {
pollTimeout = Math.min(pollTimeout, metadataFetchDelayMs);
}
// Ensure that we use a small poll timeout if there are pending calls which need to be sent
if (!pendingCalls.isEmpty())
pollTimeout = Math.min(pollTimeout, retryBackoffMs);
// Wait for network responses.
log.trace("Entering KafkaClient#poll(timeout={})", pollTimeout);
List responses = client.poll(Math.max(0L, pollTimeout), now);
log.trace("KafkaClient#poll retrieved {} response(s)", responses.size());
// unassign calls to disconnected nodes
unassignUnsentCalls(client::connectionFailed);
// Update the current time and handle the latest responses.
now = time.milliseconds();
handleResponses(now, responses);
}
}
/**
* Queue a call for sending.
*
* If the AdminClient thread has exited, this will fail. Otherwise, it will succeed (even
* if the AdminClient is shutting down). This function should called when retrying an
* existing call.
*
* @param call The new call object.
* @param now The current time in milliseconds.
*/
void enqueue(Call call, long now) {
if (call.tries > maxRetries) {
log.debug("Max retries {} for {} reached", maxRetries, call);
call.handleTimeoutFailure(time.milliseconds(), new TimeoutException(
"Exceeded maxRetries after " + call.tries + " tries."));
return;
}
if (log.isDebugEnabled()) {
log.debug("Queueing {} with a timeout {} ms from now.", call,
Math.min(requestTimeoutMs, call.deadlineMs - now));
}
boolean accepted = false;
synchronized (this) {
if (!closing) {
newCalls.add(call);
accepted = true;
}
}
if (accepted) {
client.wakeup(); // wake the thread if it is in poll()
} else {
log.debug("The AdminClient thread has exited. Timing out {}.", call);
call.handleTimeoutFailure(time.milliseconds(),
new TimeoutException("The AdminClient thread has exited."));
}
}
/**
* Initiate a new call.
*
* This will fail if the AdminClient is scheduled to shut down.
*
* @param call The new call object.
* @param now The current time in milliseconds.
*/
void call(Call call, long now) {
if (hardShutdownTimeMs.get() != INVALID_SHUTDOWN_TIME) {
log.debug("Cannot accept new call {} when AdminClient is closing.", call);
call.handleFailure(new IllegalStateException("Cannot accept new calls when AdminClient is closing."));
} else if (metadataManager.usingBootstrapControllers() &&
(!call.nodeProvider.supportsUseControllers())) {
call.fail(now, new UnsupportedEndpointTypeException("This Admin API is not " +
"yet supported when communicating directly with the controller quorum."));
} else {
enqueue(call, now);
}
}
/**
* Create a new metadata call.
*/
private Call makeMetadataCall(long now) {
if (metadataManager.usingBootstrapControllers()) {
return makeControllerMetadataCall(now);
} else {
return makeBrokerMetadataCall(now);
}
}
private Call makeControllerMetadataCall(long now) {
// Use DescribeCluster here, as specified by KIP-919.
return new Call(true, "describeCluster", calcDeadlineMs(now, requestTimeoutMs),
new MetadataUpdateNodeIdProvider()) {
@Override
public DescribeClusterRequest.Builder createRequest(int timeoutMs) {
return new DescribeClusterRequest.Builder(new DescribeClusterRequestData()
.setIncludeClusterAuthorizedOperations(false)
.setEndpointType(EndpointType.CONTROLLER.id()));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeClusterResponse response = (DescribeClusterResponse) abstractResponse;
Cluster cluster;
try {
cluster = parseDescribeClusterResponse(response.data());
} catch (ApiException e) {
handleFailure(e);
return;
}
long now = time.milliseconds();
metadataManager.update(cluster, now);
// Unassign all unsent requests after a metadata refresh to allow for a new
// destination to be selected from the new metadata
unassignUnsentCalls(node -> true);
}
@Override
boolean handleUnsupportedVersionException(final UnsupportedVersionException e) {
metadataManager.updateFailed(e);
return false;
}
@Override
public void handleFailure(Throwable e) {
metadataManager.updateFailed(e);
}
};
}
private Call makeBrokerMetadataCall(long now) {
// We use MetadataRequest here so that we can continue to support brokers that are too
// old to handle DescribeCluster.
return new Call(true, "fetchMetadata", calcDeadlineMs(now, requestTimeoutMs),
new MetadataUpdateNodeIdProvider()) {
@Override
public MetadataRequest.Builder createRequest(int timeoutMs) {
// Since this only requests node information, it's safe to pass true
// for allowAutoTopicCreation (and it simplifies communication with
// older brokers)
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(Collections.emptyList())
.setAllowAutoTopicCreation(true));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
long now = time.milliseconds();
metadataManager.update(response.buildCluster(), now);
// Unassign all unsent requests after a metadata refresh to allow for a new
// destination to be selected from the new metadata
unassignUnsentCalls(node -> true);
}
@Override
boolean handleUnsupportedVersionException(final UnsupportedVersionException e) {
metadataManager.updateFailed(e);
return false;
}
@Override
public void handleFailure(Throwable e) {
metadataManager.updateFailed(e);
}
};
}
}
static Cluster parseDescribeClusterResponse(DescribeClusterResponseData response) {
ApiError apiError = new ApiError(response.errorCode(), response.errorMessage());
if (apiError.isFailure()) {
throw apiError.exception();
}
if (response.endpointType() != EndpointType.CONTROLLER.id()) {
throw new MismatchedEndpointTypeException("Expected response from CONTROLLER " +
"endpoint, but got response from endpoint type " + (int) response.endpointType());
}
List nodes = new ArrayList<>();
Node controllerNode = null;
for (DescribeClusterResponseData.DescribeClusterBroker node : response.brokers()) {
Node newNode = new Node(node.brokerId(), node.host(), node.port(), node.rack());
nodes.add(newNode);
if (node.brokerId() == response.controllerId()) {
controllerNode = newNode;
}
}
return new Cluster(response.clusterId(),
nodes,
Collections.emptyList(),
Collections.emptySet(),
Collections.emptySet(),
controllerNode);
}
/**
* Returns true if a topic name cannot be represented in an RPC. This function does NOT check
* whether the name is too long, contains invalid characters, etc. It is better to enforce
* those policies on the server, so that they can be changed in the future if needed.
*/
private static boolean topicNameIsUnrepresentable(String topicName) {
return topicName == null || topicName.isEmpty();
}
private static boolean topicIdIsUnrepresentable(Uuid topicId) {
return topicId == null || topicId.equals(Uuid.ZERO_UUID);
}
// for testing
int numPendingCalls() {
return runnable.pendingCalls.size();
}
/**
* Fail futures in the given stream which are not done.
* Used when a response handler expected a result for some entity but no result was present.
*/
private static void completeUnrealizedFutures(
Stream>> futures,
Function messageFormatter) {
futures.filter(entry -> !entry.getValue().isDone()).forEach(entry ->
entry.getValue().completeExceptionally(new ApiException(messageFormatter.apply(entry.getKey()))));
}
/**
* Fail futures in the given Map which were retried due to exceeding quota. We propagate
* the initial error back to the caller if the request timed out.
*/
private static void maybeCompleteQuotaExceededException(
boolean shouldRetryOnQuotaViolation,
Throwable throwable,
Map> futures,
Map quotaExceededExceptions,
int throttleTimeDelta) {
if (shouldRetryOnQuotaViolation && throwable instanceof TimeoutException) {
quotaExceededExceptions.forEach((key, value) -> futures.get(key).completeExceptionally(
new ThrottlingQuotaExceededException(
Math.max(0, value.throttleTimeMs() - throttleTimeDelta),
value.getMessage())));
}
}
@Override
public CreateTopicsResult createTopics(final Collection newTopics,
final CreateTopicsOptions options) {
final Map> topicFutures = new HashMap<>(newTopics.size());
final CreatableTopicCollection topics = new CreatableTopicCollection();
for (NewTopic newTopic : newTopics) {
if (topicNameIsUnrepresentable(newTopic.name())) {
KafkaFutureImpl future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
newTopic.name() + "' cannot be represented in a request."));
topicFutures.put(newTopic.name(), future);
} else if (!topicFutures.containsKey(newTopic.name())) {
topicFutures.put(newTopic.name(), new KafkaFutureImpl<>());
topics.add(newTopic.convertToCreatableTopic());
}
}
if (!topics.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getCreateTopicsCall(options, topicFutures, topics,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new CreateTopicsResult(new HashMap<>(topicFutures));
}
private Call getCreateTopicsCall(final CreateTopicsOptions options,
final Map> futures,
final CreatableTopicCollection topics,
final Map quotaExceededExceptions,
final long now,
final long deadline) {
return new Call("createTopics", deadline, new ControllerNodeProvider()) {
@Override
public CreateTopicsRequest.Builder createRequest(int timeoutMs) {
return new CreateTopicsRequest.Builder(
new CreateTopicsRequestData()
.setTopics(topics)
.setTimeoutMs(timeoutMs)
.setValidateOnly(options.shouldValidateOnly()));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final CreateTopicsResponse response = (CreateTopicsResponse) abstractResponse;
final CreatableTopicCollection retryTopics = new CreatableTopicCollection();
final Map retryTopicQuotaExceededExceptions = new HashMap<>();
for (CreatableTopicResult result : response.data().topics()) {
KafkaFutureImpl future = futures.get(result.name());
if (future == null) {
log.warn("Server response mentioned unknown topic {}", result.name());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(
response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(topics.find(result.name()).duplicate());
retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
TopicMetadataAndConfig topicMetadataAndConfig;
if (result.topicConfigErrorCode() != Errors.NONE.code()) {
topicMetadataAndConfig = new TopicMetadataAndConfig(
Errors.forCode(result.topicConfigErrorCode()).exception());
} else if (result.numPartitions() == CreateTopicsResult.UNKNOWN) {
topicMetadataAndConfig = new TopicMetadataAndConfig(new UnsupportedVersionException(
"Topic metadata and configs in CreateTopics response not supported"));
} else {
List configs = result.configs();
Config topicConfig = new Config(configs.stream()
.map(this::configEntry)
.collect(Collectors.toSet()));
topicMetadataAndConfig = new TopicMetadataAndConfig(result.topicId(), result.numPartitions(),
result.replicationFactor(),
topicConfig);
}
future.complete(topicMetadataAndConfig);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(),
topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getCreateTopicsCall(options, futures, retryTopics,
retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
private ConfigEntry configEntry(CreatableTopicConfigs config) {
return new ConfigEntry(
config.name(),
config.value(),
configSource(DescribeConfigsResponse.ConfigSource.forId(config.configSource())),
config.isSensitive(),
config.readOnly(),
Collections.emptyList(),
null,
null);
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(),
throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
@Override
public DeleteTopicsResult deleteTopics(final TopicCollection topics,
final DeleteTopicsOptions options) {
if (topics instanceof TopicIdCollection)
return DeleteTopicsResult.ofTopicIds(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options));
else if (topics instanceof TopicNameCollection)
return DeleteTopicsResult.ofTopicNames(handleDeleteTopicsUsingNames(((TopicNameCollection) topics).topicNames(), options));
else
throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for deleteTopics.");
}
private Map> handleDeleteTopicsUsingNames(final Collection topicNames,
final DeleteTopicsOptions options) {
final Map> topicFutures = new HashMap<>(topicNames.size());
final List validTopicNames = new ArrayList<>(topicNames.size());
for (String topicName : topicNames) {
if (topicNameIsUnrepresentable(topicName)) {
KafkaFutureImpl future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
topicName + "' cannot be represented in a request."));
topicFutures.put(topicName, future);
} else if (!topicFutures.containsKey(topicName)) {
topicFutures.put(topicName, new KafkaFutureImpl<>());
validTopicNames.add(topicName);
}
}
if (!validTopicNames.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getDeleteTopicsCall(options, topicFutures, validTopicNames,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
private Map> handleDeleteTopicsUsingIds(final Collection topicIds,
final DeleteTopicsOptions options) {
final Map> topicFutures = new HashMap<>(topicIds.size());
final List validTopicIds = new ArrayList<>(topicIds.size());
for (Uuid topicId : topicIds) {
if (topicId.equals(Uuid.ZERO_UUID)) {
KafkaFutureImpl future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic ID '" +
topicId + "' cannot be represented in a request."));
topicFutures.put(topicId, future);
} else if (!topicFutures.containsKey(topicId)) {
topicFutures.put(topicId, new KafkaFutureImpl<>());
validTopicIds.add(topicId);
}
}
if (!validTopicIds.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getDeleteTopicsWithIdsCall(options, topicFutures, validTopicIds,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
private Call getDeleteTopicsCall(final DeleteTopicsOptions options,
final Map> futures,
final List topics,
final Map quotaExceededExceptions,
final long now,
final long deadline) {
return new Call("deleteTopics", deadline, new ControllerNodeProvider()) {
@Override
DeleteTopicsRequest.Builder createRequest(int timeoutMs) {
return new DeleteTopicsRequest.Builder(
new DeleteTopicsRequestData()
.setTopicNames(topics)
.setTimeoutMs(timeoutMs));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final DeleteTopicsResponse response = (DeleteTopicsResponse) abstractResponse;
final List retryTopics = new ArrayList<>();
final Map retryTopicQuotaExceededExceptions = new HashMap<>();
for (DeletableTopicResult result : response.data().responses()) {
KafkaFutureImpl future = futures.get(result.name());
if (future == null) {
log.warn("Server response mentioned unknown topic {}", result.name());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(
response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(result.name());
retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
future.complete(null);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(),
topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getDeleteTopicsCall(options, futures, retryTopics,
retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(),
throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
private Call getDeleteTopicsWithIdsCall(final DeleteTopicsOptions options,
final Map> futures,
final List topicIds,
final Map quotaExceededExceptions,
final long now,
final long deadline) {
return new Call("deleteTopics", deadline, new ControllerNodeProvider()) {
@Override
DeleteTopicsRequest.Builder createRequest(int timeoutMs) {
return new DeleteTopicsRequest.Builder(
new DeleteTopicsRequestData()
.setTopics(topicIds.stream().map(
topic -> new DeleteTopicState().setTopicId(topic)).collect(Collectors.toList()))
.setTimeoutMs(timeoutMs));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final DeleteTopicsResponse response = (DeleteTopicsResponse) abstractResponse;
final List retryTopics = new ArrayList<>();
final Map retryTopicQuotaExceededExceptions = new HashMap<>();
for (DeletableTopicResult result : response.data().responses()) {
KafkaFutureImpl future = futures.get(result.topicId());
if (future == null) {
log.warn("Server response mentioned unknown topic ID {}", result.topicId());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(
response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(result.topicId());
retryTopicQuotaExceededExceptions.put(result.topicId(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
future.complete(null);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(),
topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getDeleteTopicsWithIdsCall(options, futures, retryTopics,
retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(),
throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
@Override
public ListTopicsResult listTopics(final ListTopicsOptions options) {
final KafkaFutureImpl