Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
io.streamnative.pulsar.handlers.kop.AdminManager Maven / Gradle / Ivy
Go to download
Kafka on Pulsar implemented using Pulsar Protocol Handler
/**
* Copyright (c) 2019 - 2024 StreamNative, Inc.. All Rights Reserved.
*/
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.streamnative.pulsar.handlers.kop;
import static io.streamnative.pulsar.handlers.kop.utils.delayed.DelayedOperationKey.TopicKey;
import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_COMPACT;
import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_CONFIG;
import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_DELETE;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import io.streamnative.pulsar.handlers.kop.storage.PartitionLog;
import io.streamnative.pulsar.handlers.kop.utils.TopicNameUtils;
import io.streamnative.pulsar.handlers.kop.utils.delayed.DelayedOperation;
import io.streamnative.pulsar.handlers.kop.utils.delayed.DelayedOperationPurgatory;
import io.streamnative.pulsar.handlers.kop.utils.timer.SystemTimer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.mledger.Position;
import org.apache.bookkeeper.mledger.impl.PositionImpl;
import org.apache.kafka.clients.admin.AlterConfigOp;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.errors.InvalidPartitionsException;
import org.apache.kafka.common.errors.InvalidRequestException;
import org.apache.kafka.common.errors.TopicExistsException;
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
import org.apache.kafka.common.message.CreatePartitionsRequestData;
import org.apache.kafka.common.message.CreateTopicsRequestData;
import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.ApiError;
import org.apache.kafka.common.requests.CreateTopicsRequest;
import org.apache.kafka.common.requests.DescribeConfigsResponse;
import org.apache.kafka.common.requests.MetadataResponse;
import org.apache.pulsar.client.admin.PulsarAdmin;
import org.apache.pulsar.client.admin.PulsarAdminException;
import org.apache.pulsar.common.util.FutureUtil;
@Slf4j
public class AdminManager {
public static final String KOP_KAFKA_PROPERTY_PREFIX = "kop.kafka.";
private final DelayedOperationPurgatory topicPurgatory =
DelayedOperationPurgatory.builder()
.purgatoryName("topic")
.timeoutTimer(SystemTimer.builder().executorName("topic").build())
.build();
private final PulsarAdmin admin;
private final int defaultNumPartitions;
private final int maxMessageSize;
private long defaultCompactionThreshold = 100 * 1024 * 1024;
private volatile Map> brokersCache = Maps.newHashMap();
private final ReentrantReadWriteLock brokersCacheLock = new ReentrantReadWriteLock();
private final Random random = new Random();
private volatile Map controllerId = Maps.newHashMap();
public AdminManager(PulsarAdmin admin, KafkaServiceConfiguration conf) {
this.admin = admin;
this.defaultNumPartitions = conf.getDefaultNumPartitions();
this.maxMessageSize = conf.getMaxMessageSize();
long brokerServiceCompactionThresholdInBytes = conf.getBrokerServiceCompactionThresholdInBytes();
if (brokerServiceCompactionThresholdInBytes > 0) {
this.defaultCompactionThreshold = brokerServiceCompactionThresholdInBytes;
}
}
public void shutdown() {
topicPurgatory.shutdown();
}
// Note: the migration feature is deprecated so we just use a trivial implementation here
public CompletableFuture> createTopicsAsyncDeprecated(
Map createInfo,
int timeoutMs,
String namespacePrefix) {
final var info = new HashMap();
createInfo.forEach((pulsarTopic, creatableTopic) -> {
final var ksnTopicPartition = new KsnTopicPartition(TopicNameUtils.pulsarToKafka(pulsarTopic,
namespacePrefix.replaceAll("/", ".")), pulsarTopic);
info.put(ksnTopicPartition, creatableTopic);
});
return createTopicsAsync(info, timeoutMs, namespacePrefix);
}
public CompletableFuture> createTopicsAsync(
Map createInfo,
int timeoutMs,
String namespacePrefix) {
final var futureMap = new ConcurrentHashMap>();
final AtomicInteger numTopics = new AtomicInteger(createInfo.size());
final CompletableFuture> resultFuture = new CompletableFuture<>();
Runnable complete = () -> {
// prevent `futureMap` from being modified by createPartitionedTopicAsync()'s callback
numTopics.set(0);
// complete the pending futures with timeout error
futureMap.values().forEach(future -> {
if (!future.isDone()) {
future.complete(new ApiError(Errors.REQUEST_TIMED_OUT, null));
}
});
resultFuture.complete(futureMap.entrySet().stream().collect(Collectors.toMap(
entry -> entry.getKey().topicPartition().topic(),
entry -> entry.getValue().getNow(ApiError.NONE)
)));
};
createInfo.forEach((topic, detail) -> {
final CompletableFuture errorFuture = new CompletableFuture<>();
futureMap.put(topic, errorFuture);
int numPartitions = detail.numPartitions();
if (numPartitions == CreateTopicsRequest.NO_NUM_PARTITIONS) {
numPartitions = defaultNumPartitions;
}
if (numPartitions < 0) {
errorFuture.complete(ApiError.fromThrowable(
new InvalidRequestException("The partition '" + numPartitions + "' is negative")));
if (numTopics.decrementAndGet() == 0) {
complete.run();
}
return;
}
// https://docs.confluent.io/platform/current/installation/configuration/topic-configs.html#cleanup-policy
String cleanupPolicy = CLEANUP_POLICY_DELETE;
CreateTopicsRequestData.CreateableTopicConfigCollection configs = detail.configs();
CreateTopicsRequestData.CreateableTopicConfig cleanupPolicyConfig = configs.find(CLEANUP_POLICY_CONFIG);
if (cleanupPolicyConfig != null && cleanupPolicyConfig.value() != null) {
cleanupPolicy = cleanupPolicyConfig.value();
}
final boolean enableTopicCompact = cleanupPolicy.contains(CLEANUP_POLICY_COMPACT);
final Map properties =
Map.of(PartitionLog.KAFKA_TOPIC_UUID_PROPERTY_NAME, UUID.randomUUID().toString(),
KOP_KAFKA_PROPERTY_PREFIX + CLEANUP_POLICY_CONFIG, cleanupPolicy);
admin.topics()
.createPartitionedTopicAsync(topic.pulsarTopic(), numPartitions, properties)
.thenCompose(x -> {
// Set default compaction threshold to enable auto topic compaction
if (enableTopicCompact) {
return admin.namespaces().getCompactionThresholdAsync(namespacePrefix)
.thenCompose(compactionThreshold -> {
if (compactionThreshold != null) {
log.info("[{}] The compactionThreshold of namespace level has been set: {}",
topic.pulsarTopic(), compactionThreshold);
return CompletableFuture.completedFuture(null);
} else {
return admin.topicPolicies().setCompactionThresholdAsync(
topic.pulsarTopic(), this.defaultCompactionThreshold);
}
});
} else {
return CompletableFuture.completedFuture(null);
}
})
.whenComplete((ignored, e) -> {
if (e == null) {
if (log.isDebugEnabled()) {
log.debug("Successfully create topic '{}'", topic);
}
} else {
e = FutureUtil.unwrapCompletionException(e);
log.error("Failed to create topic '{}': {}", topic, e);
}
if (e == null) {
errorFuture.complete(ApiError.NONE);
} else if (e instanceof PulsarAdminException.ConflictException) {
errorFuture.complete(ApiError.fromThrowable(
new TopicExistsException("Topic '" + topic + "' already exists.")));
} else {
errorFuture.complete(ApiError.fromThrowable(e));
}
if (numTopics.decrementAndGet() == 0) {
complete.run();
}
});
});
if (timeoutMs <= 0) {
complete.run();
} else {
List delayedCreateKeys = createInfo.keySet().stream()
.map(KsnTopicPartition::pulsarTopic)
.map(TopicKey::new).collect(Collectors.toList());
DelayedCreateTopics delayedCreate = new DelayedCreateTopics(timeoutMs, numTopics, complete);
topicPurgatory.tryCompleteElseWatch(delayedCreate, delayedCreateKeys);
}
return resultFuture;
}
CompletableFuture alterConfigs(IncrementalAlterConfigsRequestData.AlterConfigsResource resource,
String namespacePrefix) {
List> futures = new ArrayList<>();
byte resourceType = resource.resourceType();
String resourceName = resource.resourceName();
resource.configs().forEach((IncrementalAlterConfigsRequestData.AlterableConfig entry) -> {
final var pulsarTopic = TopicNameUtils.kafkaToPulsar(resourceName, namespacePrefix);
if (resourceType == ConfigResource.Type.TOPIC.id() && entry.name().equals(CLEANUP_POLICY_CONFIG)) {
if (AlterConfigOp.OpType.forId(entry.configOperation()) == AlterConfigOp.OpType.DELETE) {
CompletableFuture future =
admin.topicPolicies().removeCompactionThresholdAsync(pulsarTopic)
.thenCompose(__ -> admin.topics().removePropertiesAsync(pulsarTopic,
KOP_KAFKA_PROPERTY_PREFIX + CLEANUP_POLICY_CONFIG));
futures.add(future);
return;
}
final Map properties =
Map.of(KOP_KAFKA_PROPERTY_PREFIX + CLEANUP_POLICY_CONFIG, entry.value());
CompletableFuture future =
admin.topicPolicies().getCompactionThresholdAsync(pulsarTopic, true)
.thenCompose(threshold -> {
if (threshold != null && threshold > 0) {
if (entry.value().contains(CLEANUP_POLICY_COMPACT)) {
log.info("[{}] The compactionThreshold has been set: {}",
pulsarTopic, threshold);
return CompletableFuture.completedFuture(null);
} else {
log.info("[{}] Disable topic compaction", pulsarTopic);
return admin.topicPolicies()
.setCompactionThresholdAsync(pulsarTopic, 0L);
}
} else {
if (entry.value().contains(CLEANUP_POLICY_COMPACT)) {
return admin.topicPolicies()
.setCompactionThresholdAsync(pulsarTopic,
this.defaultCompactionThreshold);
} else {
return CompletableFuture.completedFuture(null);
}
}
}).thenCompose(
__ -> admin.topics().updatePropertiesAsync(pulsarTopic, properties));
futures.add(future);
} else {
log.info("Ignoring ALTER_CONFIG for {} (type {}) {} = {}", resourceName, resourceType,
entry.name(), entry.value());
}
});
return FutureUtil.waitForAll(futures);
}
CompletableFuture> describeConfigsAsync(
Map>> resourceToConfigNames, String namespacePrefix) {
// Since Kafka's storage and policies are much different from Pulsar, here we just return a default config to
// avoid some Kafka based systems need to send DescribeConfigs request, like confluent schema registry.
final DescribeConfigsResponse.Config defaultTopicConfig = new DescribeConfigsResponse.Config(ApiError.NONE,
KafkaLogConfig.getEntries().entrySet().stream().map(entry ->
new DescribeConfigsResponse.ConfigEntry(entry.getKey(), entry.getValue(),
DescribeConfigsResponse.ConfigSource.TOPIC_CONFIG, false, false,
Collections.emptyList())
).collect(Collectors.toList()));
Map> futureMap =
resourceToConfigNames.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> {
ConfigResource resource = entry.getKey();
try {
CompletableFuture future = new CompletableFuture<>();
switch (resource.type()) {
case TOPIC:
final var kafkaTopic = resource.name();
final var pulsarTopic = TopicNameUtils.kafkaToPulsar(kafkaTopic, namespacePrefix);
admin.topics().getPartitionedTopicMetadataAsync(pulsarTopic)
.whenComplete((metadata, e) -> {
if (e != null) {
if (e instanceof PulsarAdminException.NotFoundException) {
final ApiError error = new ApiError(
Errors.UNKNOWN_TOPIC_OR_PARTITION,
"Topic " + kafkaTopic + " doesn't exist");
future.complete(new DescribeConfigsResponse.Config(
error, Collections.emptyList()));
} else {
future.complete(new DescribeConfigsResponse.Config(
ApiError.fromThrowable(e), Collections.emptyList()));
}
} else if (metadata.partitions > 0) {
Map properties = metadata.properties;
String cleanupPolicy = properties != null
? properties.getOrDefault(
KOP_KAFKA_PROPERTY_PREFIX + CLEANUP_POLICY_CONFIG,
CLEANUP_POLICY_DELETE) : CLEANUP_POLICY_DELETE;
if (CLEANUP_POLICY_DELETE.equals(cleanupPolicy)) {
future.complete(defaultTopicConfig);
return;
}
DescribeConfigsResponse.Config config =
new DescribeConfigsResponse.Config(
ApiError.NONE,
Lists.newArrayList(defaultTopicConfig.entries()));
config.entries().add(
new DescribeConfigsResponse.ConfigEntry(
CLEANUP_POLICY_CONFIG, cleanupPolicy,
DescribeConfigsResponse.ConfigSource.TOPIC_CONFIG,
false, false, Collections.emptyList()));
future.complete(config);
} else {
final ApiError error = new ApiError(Errors.INVALID_TOPIC_EXCEPTION,
"Topic " + kafkaTopic
+ " is non-partitioned");
future.complete(new DescribeConfigsResponse.Config(
error, Collections.emptyList()));
}
});
break;
case BROKER:
List dummyConfig = new ArrayList<>();
dummyConfig.add(buildDummyEntryConfig("num.partitions",
this.defaultNumPartitions + ""));
dummyConfig.add(buildDummyEntryConfig("message.max.bytes", maxMessageSize + ""));
// this is useless in KOP, but some tools like KSQL need a value
dummyConfig.add(buildDummyEntryConfig("default.replication.factor", "1"));
dummyConfig.add(buildDummyEntryConfig("delete.topic.enable", "true"));
future.complete(new DescribeConfigsResponse.Config(ApiError.NONE, dummyConfig));
break;
default:
return CompletableFuture.completedFuture(new DescribeConfigsResponse.Config(
ApiError.fromThrowable(
new InvalidRequestException("Unsupported resource type: "
+ resource.type())),
Collections.emptyList()));
}
return future;
} catch (Exception e) {
return CompletableFuture.completedFuture(
new DescribeConfigsResponse.Config(ApiError.fromThrowable(e), Collections.emptyList()));
}
}));
CompletableFuture> resultFuture = new CompletableFuture<>();
CompletableFuture.allOf(futureMap.values().toArray(new CompletableFuture[0])).whenComplete((ignored, e) -> {
if (e != null) {
resultFuture.completeExceptionally(e);
return;
}
resultFuture.complete(futureMap.entrySet().stream().collect(
Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getNow(null))
));
});
return resultFuture;
}
private DescribeConfigsResponse.ConfigEntry buildDummyEntryConfig(String configName, String configValue) {
return new DescribeConfigsResponse.ConfigEntry(
configName, configValue,
DescribeConfigsResponse.ConfigSource.DEFAULT_CONFIG, true, true,
Collections.emptyList());
}
public void deleteTopic(String topicToDelete,
Consumer successConsumer,
Consumer errorConsumer) {
admin.topics()
.deletePartitionedTopicAsync(topicToDelete, true, true)
.thenRun(() -> {
log.info("delete topic {} successfully.", topicToDelete);
successConsumer.accept(topicToDelete);
})
.exceptionally((e -> {
log.error("delete topic {} failed, exception: ", topicToDelete, e);
errorConsumer.accept(topicToDelete);
return null;
}));
}
public void truncateTopic(String topicToDelete,
long offset,
Position position,
Consumer successConsumer,
Consumer errorConsumer) {
log.info("truncateTopic {} at offset {}, pulsar position {}", topicToDelete, offset, position);
if (position == null) {
errorConsumer.accept("Cannot find position");
return;
}
if (position.equals(PositionImpl.LATEST)) {
admin.topics()
.truncateAsync(topicToDelete)
.thenRun(() -> {
log.info("truncated topic {} successfully.", topicToDelete);
successConsumer.accept(topicToDelete);
})
.exceptionally((e -> {
log.error("truncated topic {} failed, exception: ", topicToDelete, e);
errorConsumer.accept(topicToDelete);
return null;
}));
} else {
errorConsumer.accept("Not implemented truncate topic at position " + position);
}
}
CompletableFuture> createPartitionsAsync(
Map createInfo,
int timeoutMs) {
final Map> futureMap = new ConcurrentHashMap<>();
final AtomicInteger numTopics = new AtomicInteger(createInfo.size());
final CompletableFuture> resultFuture = new CompletableFuture<>();
Runnable complete = () -> {
// prevent `futureMap` from being modified by updatePartitionedTopicAsync()'s callback
numTopics.set(0);
// complete the pending futures with timeout error
futureMap.values().forEach(future -> {
if (!future.isDone()) {
future.complete(new ApiError(Errors.REQUEST_TIMED_OUT, null));
}
});
resultFuture.complete(futureMap.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
entry -> entry.getValue().getNow(ApiError.NONE)
)));
};
createInfo.forEach((ksnTopicPartition, newPartitions) -> {
final CompletableFuture errorFuture = new CompletableFuture<>();
final var kafkaTopic = ksnTopicPartition.topicPartition().topic();
final var pulsarTopic = ksnTopicPartition.pulsarTopic();
futureMap.put(kafkaTopic, errorFuture);
int numPartitions = newPartitions.count();
if (numPartitions < 0) {
errorFuture.complete(ApiError.fromThrowable(
new InvalidPartitionsException("The partition '" + numPartitions + "' is negative")));
} else if (newPartitions.assignments() != null && !newPartitions.assignments().isEmpty()) {
errorFuture.complete(ApiError.fromThrowable(
new InvalidRequestException(
"Kop server currently doesn't support manual assignment replica sets '"
+ newPartitions
.assignments()
.stream()
.map(CreatePartitionsRequestData.CreatePartitionsAssignment::brokerIds)
.map(String::valueOf).collect(Collectors.joining(", ", "[", "]"))
+ "' the number of partitions must be specified ")));
}
if (errorFuture.isDone()) {
if (numTopics.decrementAndGet() == 0) {
complete.run();
}
return;
}
handleUpdatePartitionsAsync(kafkaTopic, pulsarTopic, numPartitions, errorFuture, numTopics, complete);
});
if (timeoutMs <= 0) {
complete.run();
} else {
List delayedCreateKeys = createInfo.keySet().stream().map(__ -> __.topicPartition().topic())
.map(TopicKey::new).collect(Collectors.toList());
DelayedCreatePartitions delayedCreate = new DelayedCreatePartitions(timeoutMs, numTopics, complete);
topicPurgatory.tryCompleteElseWatch(delayedCreate, delayedCreateKeys);
}
return resultFuture;
}
private void handleUpdatePartitionsAsync(String kafkaTopic,
String pulsarTopic,
int newPartitions,
CompletableFuture errorFuture,
AtomicInteger numTopics,
Runnable complete) {
admin.topics().getPartitionedTopicMetadataAsync(pulsarTopic)
.whenComplete((metadata, t) -> {
if (t == null) {
int oldPartitions = metadata.partitions;
if (oldPartitions > newPartitions) {
errorFuture.complete(ApiError.fromThrowable(
new InvalidPartitionsException(
"Topic currently has '" + oldPartitions + "' partitions, "
+ "which is higher than the requested '" + newPartitions + "'.")
));
if (numTopics.decrementAndGet() == 0) {
complete.run();
}
return;
}
admin.topics().updatePartitionedTopicAsync(pulsarTopic, newPartitions)
.whenComplete((ignored, e) -> {
if (e == null) {
if (log.isDebugEnabled()) {
log.debug("Successfully create topic '{}' new partitions '{}'",
pulsarTopic, newPartitions);
}
errorFuture.complete(ApiError.NONE);
} else {
log.error("Failed to create topic '{}' new partitions '{}': {}",
pulsarTopic, newPartitions, e);
errorFuture.complete(ApiError.fromThrowable(e));
}
if (numTopics.decrementAndGet() == 0) {
complete.run();
}
});
} else {
if (t instanceof PulsarAdminException.NotFoundException) {
errorFuture.complete(ApiError.fromThrowable(
new UnknownTopicOrPartitionException("Topic '" + kafkaTopic + "' doesn't exist.")));
} else {
log.error("Failed to getPartitionMetadata for {}", pulsarTopic, t);
errorFuture.complete(ApiError.fromThrowable(t));
}
if (numTopics.decrementAndGet() == 0) {
complete.run();
}
}
});
}
public Collection extends Node> getBrokers(String listenerName) {
if (brokersCache.containsKey(listenerName)) {
return brokersCache.get(listenerName);
}
return Collections.emptyList();
}
public Map> getAllBrokers() {
return brokersCache;
}
public void setBrokers(Map> newBrokers) {
brokersCacheLock.writeLock().lock();
try {
setControllerId(newBrokers);
this.brokersCache = newBrokers;
} finally {
brokersCacheLock.writeLock().unlock();
}
}
// only set when setBrokers
private void setControllerId(Map> newBrokers) {
Map newControllerId = Maps.newHashMap();
newBrokers.forEach((listenerName, brokers) -> {
if (brokers.size() == 0) {
newControllerId.put(listenerName, MetadataResponse.NO_CONTROLLER_ID);
} else {
List nodes = Lists.newArrayList(brokers);
newControllerId.put(listenerName,
nodes.size() > 1 ? nodes.get(random.nextInt(brokers.size())).id() : nodes.get(0).id());
}
});
this.controllerId = newControllerId;
}
// always get the controllerId directly from the cache
public int getControllerId(String listenerName) {
return controllerId.getOrDefault(listenerName, MetadataResponse.NO_CONTROLLER_ID);
}
}