Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.controller;
import org.apache.kafka.clients.admin.AlterConfigOp.OpType;
import org.apache.kafka.clients.admin.ConfigEntry;
import org.apache.kafka.common.ElectionType;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.errors.ApiException;
import org.apache.kafka.common.errors.BrokerIdNotRegisteredException;
import org.apache.kafka.common.errors.InvalidPartitionsException;
import org.apache.kafka.common.errors.InvalidReplicaAssignmentException;
import org.apache.kafka.common.errors.InvalidReplicationFactorException;
import org.apache.kafka.common.errors.InvalidRequestException;
import org.apache.kafka.common.errors.InvalidTopicException;
import org.apache.kafka.common.errors.NoReassignmentInProgressException;
import org.apache.kafka.common.errors.PolicyViolationException;
import org.apache.kafka.common.errors.ThrottlingQuotaExceededException;
import org.apache.kafka.common.errors.UnknownServerException;
import org.apache.kafka.common.errors.UnknownTopicIdException;
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
import org.apache.kafka.common.internals.Topic;
import org.apache.kafka.common.message.AlterPartitionRequestData;
import org.apache.kafka.common.message.AlterPartitionRequestData.BrokerState;
import org.apache.kafka.common.message.AlterPartitionResponseData;
import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData;
import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignablePartition;
import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignableTopic;
import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData;
import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignablePartitionResponse;
import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignableTopicResponse;
import org.apache.kafka.common.message.BrokerHeartbeatRequestData;
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsAssignment;
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic;
import org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult;
import org.apache.kafka.common.message.CreateTopicsRequestData;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignment;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCollection;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreateableTopicConfigCollection;
import org.apache.kafka.common.message.CreateTopicsResponseData;
import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult;
import org.apache.kafka.common.message.ElectLeadersRequestData;
import org.apache.kafka.common.message.ElectLeadersRequestData.TopicPartitions;
import org.apache.kafka.common.message.ElectLeadersResponseData;
import org.apache.kafka.common.message.ElectLeadersResponseData.PartitionResult;
import org.apache.kafka.common.message.ElectLeadersResponseData.ReplicaElectionResult;
import org.apache.kafka.common.message.ListPartitionReassignmentsRequestData.ListPartitionReassignmentsTopics;
import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData;
import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingPartitionReassignment;
import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingTopicReassignment;
import org.apache.kafka.common.metadata.BrokerRegistrationChangeRecord;
import org.apache.kafka.common.metadata.FenceBrokerRecord;
import org.apache.kafka.common.metadata.PartitionChangeRecord;
import org.apache.kafka.common.metadata.PartitionRecord;
import org.apache.kafka.common.metadata.RemoveTopicRecord;
import org.apache.kafka.common.metadata.TopicRecord;
import org.apache.kafka.common.metadata.UnfenceBrokerRecord;
import org.apache.kafka.common.metadata.UnregisterBrokerRecord;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.AlterPartitionRequest;
import org.apache.kafka.common.requests.ApiError;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.metadata.BrokerHeartbeatReply;
import org.apache.kafka.metadata.BrokerRegistration;
import org.apache.kafka.metadata.BrokerRegistrationFencingChange;
import org.apache.kafka.metadata.BrokerRegistrationInControlledShutdownChange;
import org.apache.kafka.metadata.KafkaConfigSchema;
import org.apache.kafka.metadata.LeaderRecoveryState;
import org.apache.kafka.metadata.PartitionRegistration;
import org.apache.kafka.metadata.Replicas;
import org.apache.kafka.metadata.placement.ClusterDescriber;
import org.apache.kafka.metadata.placement.PartitionAssignment;
import org.apache.kafka.metadata.placement.PlacementSpec;
import org.apache.kafka.metadata.placement.TopicAssignment;
import org.apache.kafka.metadata.placement.UsableBroker;
import org.apache.kafka.server.common.ApiMessageAndVersion;
import org.apache.kafka.server.mutable.BoundedList;
import org.apache.kafka.server.policy.CreateTopicPolicy;
import org.apache.kafka.timeline.SnapshotRegistry;
import org.apache.kafka.timeline.TimelineHashMap;
import org.apache.kafka.timeline.TimelineHashSet;
import org.slf4j.Logger;
import java.util.AbstractMap.SimpleImmutableEntry;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.Map.Entry;
import java.util.Map;
import java.util.Optional;
import java.util.OptionalInt;
import java.util.Set;
import java.util.function.IntPredicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import static org.apache.kafka.clients.admin.AlterConfigOp.OpType.SET;
import static org.apache.kafka.common.config.ConfigResource.Type.TOPIC;
import static org.apache.kafka.common.protocol.Errors.FENCED_LEADER_EPOCH;
import static org.apache.kafka.common.protocol.Errors.INELIGIBLE_REPLICA;
import static org.apache.kafka.common.protocol.Errors.INVALID_REQUEST;
import static org.apache.kafka.common.protocol.Errors.INVALID_UPDATE_VERSION;
import static org.apache.kafka.common.protocol.Errors.NEW_LEADER_ELECTED;
import static org.apache.kafka.common.protocol.Errors.NONE;
import static org.apache.kafka.common.protocol.Errors.NOT_CONTROLLER;
import static org.apache.kafka.common.protocol.Errors.NO_REASSIGNMENT_IN_PROGRESS;
import static org.apache.kafka.common.protocol.Errors.OPERATION_NOT_ATTEMPTED;
import static org.apache.kafka.common.protocol.Errors.TOPIC_AUTHORIZATION_FAILED;
import static org.apache.kafka.common.protocol.Errors.UNKNOWN_TOPIC_ID;
import static org.apache.kafka.common.protocol.Errors.UNKNOWN_TOPIC_OR_PARTITION;
import static org.apache.kafka.controller.PartitionReassignmentReplicas.isReassignmentInProgress;
import static org.apache.kafka.controller.QuorumController.MAX_RECORDS_PER_USER_OP;
import static org.apache.kafka.metadata.LeaderConstants.NO_LEADER;
import static org.apache.kafka.metadata.LeaderConstants.NO_LEADER_CHANGE;
/**
* The ReplicationControlManager is the part of the controller which deals with topics
* and partitions. It is responsible for managing the in-sync replica set and leader
* of each partition, as well as administrative tasks like creating or deleting topics.
*/
public class ReplicationControlManager {
static final int MAX_ELECTIONS_PER_IMBALANCE = 1_000;
static class Builder {
private SnapshotRegistry snapshotRegistry = null;
private LogContext logContext = null;
private short defaultReplicationFactor = (short) 3;
private int defaultNumPartitions = 1;
private int maxElectionsPerImbalance = MAX_ELECTIONS_PER_IMBALANCE;
private ConfigurationControlManager configurationControl = null;
private ClusterControlManager clusterControl = null;
private Optional createTopicPolicy = Optional.empty();
private FeatureControlManager featureControl = null;
Builder setSnapshotRegistry(SnapshotRegistry snapshotRegistry) {
this.snapshotRegistry = snapshotRegistry;
return this;
}
Builder setLogContext(LogContext logContext) {
this.logContext = logContext;
return this;
}
Builder setDefaultReplicationFactor(short defaultReplicationFactor) {
this.defaultReplicationFactor = defaultReplicationFactor;
return this;
}
Builder setDefaultNumPartitions(int defaultNumPartitions) {
this.defaultNumPartitions = defaultNumPartitions;
return this;
}
Builder setMaxElectionsPerImbalance(int maxElectionsPerImbalance) {
this.maxElectionsPerImbalance = maxElectionsPerImbalance;
return this;
}
Builder setConfigurationControl(ConfigurationControlManager configurationControl) {
this.configurationControl = configurationControl;
return this;
}
Builder setClusterControl(ClusterControlManager clusterControl) {
this.clusterControl = clusterControl;
return this;
}
Builder setCreateTopicPolicy(Optional createTopicPolicy) {
this.createTopicPolicy = createTopicPolicy;
return this;
}
public Builder setFeatureControl(FeatureControlManager featureControl) {
this.featureControl = featureControl;
return this;
}
ReplicationControlManager build() {
if (configurationControl == null) {
throw new IllegalStateException("Configuration control must be set before building");
} else if (clusterControl == null) {
throw new IllegalStateException("Cluster control must be set before building");
}
if (logContext == null) logContext = new LogContext();
if (snapshotRegistry == null) snapshotRegistry = configurationControl.snapshotRegistry();
if (featureControl == null) {
throw new IllegalStateException("FeatureControlManager must not be null");
}
return new ReplicationControlManager(snapshotRegistry,
logContext,
defaultReplicationFactor,
defaultNumPartitions,
maxElectionsPerImbalance,
configurationControl,
clusterControl,
createTopicPolicy,
featureControl);
}
}
class KRaftClusterDescriber implements ClusterDescriber {
@Override
public Iterator usableBrokers() {
return clusterControl.usableBrokers();
}
}
static class TopicControlInfo {
private final String name;
private final Uuid id;
private final TimelineHashMap parts;
TopicControlInfo(String name, SnapshotRegistry snapshotRegistry, Uuid id) {
this.name = name;
this.id = id;
this.parts = new TimelineHashMap<>(snapshotRegistry, 0);
}
public String name() {
return name;
}
public Uuid topicId() {
return id;
}
public int numPartitions(long epoch) {
return parts.size(epoch);
}
}
/**
* Translate a CreateableTopicConfigCollection to a map from string to string.
*/
static Map translateCreationConfigs(CreateableTopicConfigCollection collection) {
HashMap result = new HashMap<>();
collection.forEach(config -> result.put(config.name(), config.value()));
return Collections.unmodifiableMap(result);
}
private final SnapshotRegistry snapshotRegistry;
private final Logger log;
/**
* The KIP-464 default replication factor that is used if a CreateTopics request does
* not specify one.
*/
private final short defaultReplicationFactor;
/**
* The KIP-464 default number of partitions that is used if a CreateTopics request does
* not specify a number of partitions.
*/
private final int defaultNumPartitions;
/**
* Maximum number of leader elections to perform during one partition leader balancing operation.
*/
private final int maxElectionsPerImbalance;
/**
* A reference to the controller's configuration control manager.
*/
private final ConfigurationControlManager configurationControl;
/**
* A reference to the controller's cluster control manager.
*/
private final ClusterControlManager clusterControl;
/**
* The policy to use to validate that topic assignments are valid, if one is present.
*/
private final Optional createTopicPolicy;
/**
* The feature control manager.
*/
private final FeatureControlManager featureControl;
/**
* Maps topic names to topic UUIDs.
*/
private final TimelineHashMap topicsByName;
/**
* We try to prevent topics from being created if their names would collide with
* existing topics when periods in the topic name are replaced with underscores.
* The reason for this is that some per-topic metrics do replace periods with
* underscores, and would therefore be ambiguous otherwise.
*
* This map is from normalized topic name to a set of topic names. So if we had two
* topics named foo.bar and foo_bar this map would contain
* a mapping from foo_bar to a set containing foo.bar and foo_bar.
*
* Since we reject topic creations that would collide, under normal conditions the
* sets in this map should only have a size of 1. However, if the cluster was
* upgraded from a version prior to KAFKA-13743, it may be possible to have more
* values here, since colliding topic names will be "grandfathered in."
*/
private final TimelineHashMap> topicsWithCollisionChars;
/**
* Maps topic UUIDs to structures containing topic information, including partitions.
*/
private final TimelineHashMap topics;
/**
* A map of broker IDs to the partitions that the broker is in the ISR for.
*/
private final BrokersToIsrs brokersToIsrs;
/**
* A map from topic IDs to the partitions in the topic which are reassigning.
*/
private final TimelineHashMap reassigningTopics;
/**
* The set of topic partitions for which the leader is not the preferred leader.
*/
private final TimelineHashSet imbalancedPartitions;
/**
* A ClusterDescriber which supplies cluster information to our ReplicaPlacer.
*/
final KRaftClusterDescriber clusterDescriber = new KRaftClusterDescriber();
private ReplicationControlManager(
SnapshotRegistry snapshotRegistry,
LogContext logContext,
short defaultReplicationFactor,
int defaultNumPartitions,
int maxElectionsPerImbalance,
ConfigurationControlManager configurationControl,
ClusterControlManager clusterControl,
Optional createTopicPolicy,
FeatureControlManager featureControl
) {
this.snapshotRegistry = snapshotRegistry;
this.log = logContext.logger(ReplicationControlManager.class);
this.defaultReplicationFactor = defaultReplicationFactor;
this.defaultNumPartitions = defaultNumPartitions;
this.maxElectionsPerImbalance = maxElectionsPerImbalance;
this.configurationControl = configurationControl;
this.createTopicPolicy = createTopicPolicy;
this.featureControl = featureControl;
this.clusterControl = clusterControl;
this.topicsByName = new TimelineHashMap<>(snapshotRegistry, 0);
this.topicsWithCollisionChars = new TimelineHashMap<>(snapshotRegistry, 0);
this.topics = new TimelineHashMap<>(snapshotRegistry, 0);
this.brokersToIsrs = new BrokersToIsrs(snapshotRegistry);
this.reassigningTopics = new TimelineHashMap<>(snapshotRegistry, 0);
this.imbalancedPartitions = new TimelineHashSet<>(snapshotRegistry, 0);
}
public void replay(TopicRecord record) {
topicsByName.put(record.name(), record.topicId());
if (Topic.hasCollisionChars(record.name())) {
String normalizedName = Topic.unifyCollisionChars(record.name());
TimelineHashSet topicNames = topicsWithCollisionChars.get(normalizedName);
if (topicNames == null) {
topicNames = new TimelineHashSet<>(snapshotRegistry, 1);
topicsWithCollisionChars.put(normalizedName, topicNames);
}
topicNames.add(record.name());
}
topics.put(record.topicId(),
new TopicControlInfo(record.name(), snapshotRegistry, record.topicId()));
log.info("Created topic {} with topic ID {}.", record.name(), record.topicId());
}
public void replay(PartitionRecord record) {
TopicControlInfo topicInfo = topics.get(record.topicId());
if (topicInfo == null) {
throw new RuntimeException("Tried to create partition " + record.topicId() +
":" + record.partitionId() + ", but no topic with that ID was found.");
}
PartitionRegistration newPartInfo = new PartitionRegistration(record);
PartitionRegistration prevPartInfo = topicInfo.parts.get(record.partitionId());
String description = topicInfo.name + "-" + record.partitionId() +
" with topic ID " + record.topicId();
if (prevPartInfo == null) {
log.info("Created partition {} and {}.", description, newPartInfo);
topicInfo.parts.put(record.partitionId(), newPartInfo);
brokersToIsrs.update(record.topicId(), record.partitionId(), null,
newPartInfo.isr, NO_LEADER, newPartInfo.leader);
updateReassigningTopicsIfNeeded(record.topicId(), record.partitionId(),
false, isReassignmentInProgress(newPartInfo));
} else if (!newPartInfo.equals(prevPartInfo)) {
newPartInfo.maybeLogPartitionChange(log, description, prevPartInfo);
topicInfo.parts.put(record.partitionId(), newPartInfo);
brokersToIsrs.update(record.topicId(), record.partitionId(), prevPartInfo.isr,
newPartInfo.isr, prevPartInfo.leader, newPartInfo.leader);
updateReassigningTopicsIfNeeded(record.topicId(), record.partitionId(),
isReassignmentInProgress(prevPartInfo), isReassignmentInProgress(newPartInfo));
}
if (newPartInfo.hasPreferredLeader()) {
imbalancedPartitions.remove(new TopicIdPartition(record.topicId(), record.partitionId()));
} else {
imbalancedPartitions.add(new TopicIdPartition(record.topicId(), record.partitionId()));
}
}
private void updateReassigningTopicsIfNeeded(Uuid topicId, int partitionId,
boolean wasReassigning, boolean isReassigning) {
if (!wasReassigning) {
if (isReassigning) {
int[] prevReassigningParts = reassigningTopics.getOrDefault(topicId, Replicas.NONE);
reassigningTopics.put(topicId, Replicas.copyWith(prevReassigningParts, partitionId));
}
} else if (!isReassigning) {
int[] prevReassigningParts = reassigningTopics.getOrDefault(topicId, Replicas.NONE);
int[] newReassigningParts = Replicas.copyWithout(prevReassigningParts, partitionId);
if (newReassigningParts.length == 0) {
reassigningTopics.remove(topicId);
} else {
reassigningTopics.put(topicId, newReassigningParts);
}
}
}
public void replay(PartitionChangeRecord record) {
TopicControlInfo topicInfo = topics.get(record.topicId());
if (topicInfo == null) {
throw new RuntimeException("Tried to create partition " + record.topicId() +
":" + record.partitionId() + ", but no topic with that ID was found.");
}
PartitionRegistration prevPartitionInfo = topicInfo.parts.get(record.partitionId());
if (prevPartitionInfo == null) {
throw new RuntimeException("Tried to create partition " + record.topicId() +
":" + record.partitionId() + ", but no partition with that id was found.");
}
PartitionRegistration newPartitionInfo = prevPartitionInfo.merge(record);
updateReassigningTopicsIfNeeded(record.topicId(), record.partitionId(),
isReassignmentInProgress(prevPartitionInfo), isReassignmentInProgress(newPartitionInfo));
topicInfo.parts.put(record.partitionId(), newPartitionInfo);
brokersToIsrs.update(record.topicId(), record.partitionId(),
prevPartitionInfo.isr, newPartitionInfo.isr, prevPartitionInfo.leader,
newPartitionInfo.leader);
String topicPart = topicInfo.name + "-" + record.partitionId() + " with topic ID " +
record.topicId();
newPartitionInfo.maybeLogPartitionChange(log, topicPart, prevPartitionInfo);
if (newPartitionInfo.hasPreferredLeader()) {
imbalancedPartitions.remove(new TopicIdPartition(record.topicId(), record.partitionId()));
} else {
imbalancedPartitions.add(new TopicIdPartition(record.topicId(), record.partitionId()));
}
if (record.removingReplicas() != null || record.addingReplicas() != null) {
log.info("Replayed partition assignment change {} for topic {}", record, topicInfo.name);
} else if (log.isTraceEnabled()) {
log.trace("Replayed partition change {} for topic {}", record, topicInfo.name);
}
}
public void replay(RemoveTopicRecord record) {
// Remove this topic from the topics map and the topicsByName map.
TopicControlInfo topic = topics.remove(record.topicId());
if (topic == null) {
throw new UnknownTopicIdException("Can't find topic with ID " + record.topicId() +
" to remove.");
}
topicsByName.remove(topic.name);
if (Topic.hasCollisionChars(topic.name)) {
String normalizedName = Topic.unifyCollisionChars(topic.name);
TimelineHashSet colliding = topicsWithCollisionChars.get(normalizedName);
if (colliding != null) {
colliding.remove(topic.name);
if (colliding.isEmpty()) {
topicsWithCollisionChars.remove(normalizedName);
}
}
}
reassigningTopics.remove(record.topicId());
// Delete the configurations associated with this topic.
configurationControl.deleteTopicConfigs(topic.name);
for (Map.Entry entry : topic.parts.entrySet()) {
int partitionId = entry.getKey();
PartitionRegistration partition = entry.getValue();
// Remove the entries for this topic in brokersToIsrs.
for (int i = 0; i < partition.isr.length; i++) {
brokersToIsrs.removeTopicEntryForBroker(topic.id, partition.isr[i]);
}
imbalancedPartitions.remove(new TopicIdPartition(record.topicId(), partitionId));
}
brokersToIsrs.removeTopicEntryForBroker(topic.id, NO_LEADER);
log.info("Removed topic {} with ID {}.", topic.name, record.topicId());
}
ControllerResult createTopics(
ControllerRequestContext context,
CreateTopicsRequestData request,
Set describable
) {
Map topicErrors = new HashMap<>();
List records = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP);
// Check the topic names.
validateNewTopicNames(topicErrors, request.topics(), topicsWithCollisionChars);
// Identify topics that already exist and mark them with the appropriate error
request.topics().stream().filter(creatableTopic -> topicsByName.containsKey(creatableTopic.name()))
.forEach(t -> topicErrors.put(t.name(), new ApiError(Errors.TOPIC_ALREADY_EXISTS,
"Topic '" + t.name() + "' already exists.")));
// Verify that the configurations for the new topics are OK, and figure out what
// configurations should be created.
Map>> configChanges =
computeConfigChanges(topicErrors, request.topics());
// Try to create whatever topics are needed.
Map successes = new HashMap<>();
for (CreatableTopic topic : request.topics()) {
if (topicErrors.containsKey(topic.name())) continue;
// Figure out what ConfigRecords should be created, if any.
ConfigResource configResource = new ConfigResource(TOPIC, topic.name());
Map> keyToOps = configChanges.get(configResource);
List configRecords;
if (keyToOps != null) {
ControllerResult configResult =
configurationControl.incrementalAlterConfig(configResource, keyToOps, true);
if (configResult.response().isFailure()) {
topicErrors.put(topic.name(), configResult.response());
continue;
} else {
configRecords = configResult.records();
}
} else {
configRecords = Collections.emptyList();
}
ApiError error;
try {
error = createTopic(context, topic, records, successes, configRecords, describable.contains(topic.name()));
} catch (ApiException e) {
error = ApiError.fromThrowable(e);
}
if (error.isFailure()) {
topicErrors.put(topic.name(), error);
}
}
// Create responses for all topics.
CreateTopicsResponseData data = new CreateTopicsResponseData();
StringBuilder resultsBuilder = new StringBuilder();
String resultsPrefix = "";
for (CreatableTopic topic : request.topics()) {
ApiError error = topicErrors.get(topic.name());
if (error != null) {
data.topics().add(new CreatableTopicResult().
setName(topic.name()).
setErrorCode(error.error().code()).
setErrorMessage(error.message()));
resultsBuilder.append(resultsPrefix).append(topic).append(": ").
append(error.error()).append(" (").append(error.message()).append(")");
resultsPrefix = ", ";
continue;
}
CreatableTopicResult result = successes.get(topic.name());
data.topics().add(result);
resultsBuilder.append(resultsPrefix).append(topic).append(": ").
append("SUCCESS");
resultsPrefix = ", ";
}
if (request.validateOnly()) {
log.info("Validate-only CreateTopics result(s): {}", resultsBuilder);
return ControllerResult.atomicOf(Collections.emptyList(), data);
} else {
log.info("CreateTopics result(s): {}", resultsBuilder);
return ControllerResult.atomicOf(records, data);
}
}
private ApiError createTopic(ControllerRequestContext context,
CreatableTopic topic,
List records,
Map successes,
List configRecords,
boolean authorizedToReturnConfigs) {
Map creationConfigs = translateCreationConfigs(topic.configs());
Map newParts = new HashMap<>();
if (!topic.assignments().isEmpty()) {
if (topic.replicationFactor() != -1) {
return new ApiError(INVALID_REQUEST,
"A manual partition assignment was specified, but replication " +
"factor was not set to -1.");
}
if (topic.numPartitions() != -1) {
return new ApiError(INVALID_REQUEST,
"A manual partition assignment was specified, but numPartitions " +
"was not set to -1.");
}
OptionalInt replicationFactor = OptionalInt.empty();
for (CreatableReplicaAssignment assignment : topic.assignments()) {
if (newParts.containsKey(assignment.partitionIndex())) {
return new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT,
"Found multiple manual partition assignments for partition " +
assignment.partitionIndex());
}
validateManualPartitionAssignment(
new PartitionAssignment(assignment.brokerIds()),
replicationFactor);
replicationFactor = OptionalInt.of(assignment.brokerIds().size());
List isr = assignment.brokerIds().stream().
filter(clusterControl::isActive).collect(Collectors.toList());
if (isr.isEmpty()) {
return new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT,
"All brokers specified in the manual partition assignment for " +
"partition " + assignment.partitionIndex() + " are fenced or in controlled shutdown.");
}
newParts.put(assignment.partitionIndex(), new PartitionRegistration(
Replicas.toArray(assignment.brokerIds()), Replicas.toArray(isr),
Replicas.NONE, Replicas.NONE, isr.get(0), LeaderRecoveryState.RECOVERED, 0, 0));
}
for (int i = 0; i < newParts.size(); i++) {
if (!newParts.containsKey(i)) {
return new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT,
"partitions should be a consecutive 0-based integer sequence");
}
}
ApiError error = maybeCheckCreateTopicPolicy(() -> {
Map> assignments = new HashMap<>();
newParts.entrySet().forEach(e -> assignments.put(e.getKey(),
Replicas.toList(e.getValue().replicas)));
return new CreateTopicPolicy.RequestMetadata(
topic.name(), null, null, assignments, creationConfigs);
});
if (error.isFailure()) return error;
} else if (topic.replicationFactor() < -1 || topic.replicationFactor() == 0) {
return new ApiError(Errors.INVALID_REPLICATION_FACTOR,
"Replication factor must be larger than 0, or -1 to use the default value.");
} else if (topic.numPartitions() < -1 || topic.numPartitions() == 0) {
return new ApiError(Errors.INVALID_PARTITIONS,
"Number of partitions was set to an invalid non-positive value.");
} else {
int numPartitions = topic.numPartitions() == -1 ?
defaultNumPartitions : topic.numPartitions();
short replicationFactor = topic.replicationFactor() == -1 ?
defaultReplicationFactor : topic.replicationFactor();
try {
TopicAssignment topicAssignment = clusterControl.replicaPlacer().place(new PlacementSpec(
0,
numPartitions,
replicationFactor
), clusterDescriber);
for (int partitionId = 0; partitionId < topicAssignment.assignments().size(); partitionId++) {
PartitionAssignment partitionAssignment = topicAssignment.assignments().get(partitionId);
List replicas = partitionAssignment.replicas();
List isr = replicas.stream().
filter(clusterControl::isActive).collect(Collectors.toList());
// If the ISR is empty, it means that all brokers are fenced or
// in controlled shutdown. To be consistent with the replica placer,
// we reject the create topic request with INVALID_REPLICATION_FACTOR.
if (isr.isEmpty()) {
return new ApiError(Errors.INVALID_REPLICATION_FACTOR,
"Unable to replicate the partition " + replicationFactor +
" time(s): All brokers are currently fenced or in controlled shutdown.");
}
newParts.put(partitionId,
new PartitionRegistration(
Replicas.toArray(replicas),
Replicas.toArray(isr),
Replicas.NONE,
Replicas.NONE,
isr.get(0),
LeaderRecoveryState.RECOVERED,
0,
0));
}
} catch (InvalidReplicationFactorException e) {
return new ApiError(Errors.INVALID_REPLICATION_FACTOR,
"Unable to replicate the partition " + replicationFactor +
" time(s): " + e.getMessage());
}
ApiError error = maybeCheckCreateTopicPolicy(() -> new CreateTopicPolicy.RequestMetadata(
topic.name(), numPartitions, replicationFactor, null, creationConfigs));
if (error.isFailure()) return error;
}
int numPartitions = newParts.size();
try {
context.applyPartitionChangeQuota(numPartitions); // check controller mutation quota
} catch (ThrottlingQuotaExceededException e) {
log.debug("Topic creation of {} partitions not allowed because quota is violated. Delay time: {}",
numPartitions, e.throttleTimeMs());
return ApiError.fromThrowable(e);
}
Uuid topicId = Uuid.randomUuid();
CreatableTopicResult result = new CreatableTopicResult().
setName(topic.name()).
setTopicId(topicId).
setErrorCode(NONE.code()).
setErrorMessage(null);
if (authorizedToReturnConfigs) {
Map effectiveConfig = configurationControl.
computeEffectiveTopicConfigs(creationConfigs);
List configNames = new ArrayList<>(effectiveConfig.keySet());
configNames.sort(String::compareTo);
for (String configName : configNames) {
ConfigEntry entry = effectiveConfig.get(configName);
result.configs().add(new CreateTopicsResponseData.CreatableTopicConfigs().
setName(entry.name()).
setValue(entry.isSensitive() ? null : entry.value()).
setReadOnly(entry.isReadOnly()).
setConfigSource(KafkaConfigSchema.translateConfigSource(entry.source()).id()).
setIsSensitive(entry.isSensitive()));
}
result.setNumPartitions(numPartitions);
result.setReplicationFactor((short) newParts.values().iterator().next().replicas.length);
result.setTopicConfigErrorCode(NONE.code());
} else {
result.setTopicConfigErrorCode(TOPIC_AUTHORIZATION_FAILED.code());
}
successes.put(topic.name(), result);
records.add(new ApiMessageAndVersion(new TopicRecord().
setName(topic.name()).
setTopicId(topicId), (short) 0));
// ConfigRecords go after TopicRecord but before PartitionRecord(s).
records.addAll(configRecords);
for (Entry partEntry : newParts.entrySet()) {
int partitionIndex = partEntry.getKey();
PartitionRegistration info = partEntry.getValue();
records.add(info.toRecord(topicId, partitionIndex));
}
return ApiError.NONE;
}
private ApiError maybeCheckCreateTopicPolicy(Supplier supplier) {
if (createTopicPolicy.isPresent()) {
try {
createTopicPolicy.get().validate(supplier.get());
} catch (PolicyViolationException e) {
return new ApiError(Errors.POLICY_VIOLATION, e.getMessage());
}
}
return ApiError.NONE;
}
static void validateNewTopicNames(Map topicErrors,
CreatableTopicCollection topics,
Map> topicsWithCollisionChars) {
for (CreatableTopic topic : topics) {
if (topicErrors.containsKey(topic.name())) continue;
try {
Topic.validate(topic.name());
} catch (InvalidTopicException e) {
topicErrors.put(topic.name(),
new ApiError(Errors.INVALID_TOPIC_EXCEPTION, e.getMessage()));
}
if (Topic.hasCollisionChars(topic.name())) {
String normalizedName = Topic.unifyCollisionChars(topic.name());
Set colliding = topicsWithCollisionChars.get(normalizedName);
if (colliding != null) {
topicErrors.put(topic.name(), new ApiError(Errors.INVALID_TOPIC_EXCEPTION,
"Topic '" + topic.name() + "' collides with existing topic: " +
colliding.iterator().next()));
}
}
}
}
static Map>>
computeConfigChanges(Map topicErrors,
CreatableTopicCollection topics) {
Map>> configChanges = new HashMap<>();
for (CreatableTopic topic : topics) {
if (topicErrors.containsKey(topic.name())) continue;
Map> topicConfigs = new HashMap<>();
List nullConfigs = new ArrayList<>();
for (CreateTopicsRequestData.CreateableTopicConfig config : topic.configs()) {
if (config.value() == null) {
nullConfigs.add(config.name());
} else {
topicConfigs.put(config.name(), new SimpleImmutableEntry<>(SET, config.value()));
}
}
if (!nullConfigs.isEmpty()) {
topicErrors.put(topic.name(), new ApiError(Errors.INVALID_CONFIG,
"Null value not supported for topic configs: " + String.join(",", nullConfigs)));
} else if (!topicConfigs.isEmpty()) {
configChanges.put(new ConfigResource(TOPIC, topic.name()), topicConfigs);
}
}
return configChanges;
}
Map> findTopicIds(long offset, Collection names) {
Map> results = new HashMap<>(names.size());
for (String name : names) {
if (name == null) {
results.put(null, new ResultOrError<>(INVALID_REQUEST, "Invalid null topic name."));
} else {
Uuid id = topicsByName.get(name, offset);
if (id == null) {
results.put(name, new ResultOrError<>(
new ApiError(UNKNOWN_TOPIC_OR_PARTITION)));
} else {
results.put(name, new ResultOrError<>(id));
}
}
}
return results;
}
Map findAllTopicIds(long offset) {
HashMap result = new HashMap<>(topicsByName.size(offset));
for (Entry entry : topicsByName.entrySet(offset)) {
result.put(entry.getKey(), entry.getValue());
}
return result;
}
Map> findTopicNames(long offset, Collection ids) {
Map> results = new HashMap<>(ids.size());
for (Uuid id : ids) {
if (id == null || id.equals(Uuid.ZERO_UUID)) {
results.put(id, new ResultOrError<>(new ApiError(INVALID_REQUEST,
"Attempt to find topic with invalid topicId " + id)));
} else {
TopicControlInfo topic = topics.get(id, offset);
if (topic == null) {
results.put(id, new ResultOrError<>(new ApiError(UNKNOWN_TOPIC_ID)));
} else {
results.put(id, new ResultOrError<>(topic.name));
}
}
}
return results;
}
ControllerResult