All Downloads are FREE. Search and download functionalities are using the official Maven repository.

io.streamnative.pulsar.handlers.kop.KafkaProtocolHandler Maven / Gradle / Ivy

There is a newer version: 3.3.1.5
Show newest version
/**
 * Copyright (c) 2019 - 2024 StreamNative, Inc.. All Rights Reserved.
 */
/**
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package io.streamnative.pulsar.handlers.kop;

import static com.google.common.base.Preconditions.checkState;
import static io.streamnative.pulsar.handlers.kop.KopServerStats.SERVER_SCOPE;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.ssl.SslHandler;
import io.streamnative.pulsar.handlers.kop.coordinator.group.GroupConfig;
import io.streamnative.pulsar.handlers.kop.coordinator.group.GroupCoordinator;
import io.streamnative.pulsar.handlers.kop.coordinator.group.OffsetConfig;
import io.streamnative.pulsar.handlers.kop.coordinator.transaction.TransactionConfig;
import io.streamnative.pulsar.handlers.kop.coordinator.transaction.TransactionCoordinator;
import io.streamnative.pulsar.handlers.kop.http.HttpChannelInitializer;
import io.streamnative.pulsar.handlers.kop.lookup.LookupService;
import io.streamnative.pulsar.handlers.kop.lookup.PulsarClientLookupService;
import io.streamnative.pulsar.handlers.kop.migration.MigrationManager;
import io.streamnative.pulsar.handlers.kop.schemaregistry.SchemaRegistryChannelInitializer;
import io.streamnative.pulsar.handlers.kop.security.kerberos.KerberosLogin;
import io.streamnative.pulsar.handlers.kop.stats.PrometheusMetricsProvider;
import io.streamnative.pulsar.handlers.kop.stats.StatsLogger;
import io.streamnative.pulsar.handlers.kop.storage.ProducerStateManagerSnapshotBufferFactory;
import io.streamnative.pulsar.handlers.kop.storage.ReplicaManager;
import io.streamnative.pulsar.handlers.kop.topic.KopTopicFactory;
import io.streamnative.pulsar.handlers.kop.utils.ConfigurationUtils;
import io.streamnative.pulsar.handlers.kop.utils.KopTopic;
import io.streamnative.pulsar.handlers.kop.utils.MetadataUtils;
import io.streamnative.pulsar.handlers.kop.utils.delayed.DelayedOperation;
import io.streamnative.pulsar.handlers.kop.utils.delayed.DelayedOperationPurgatory;
import io.streamnative.pulsar.handlers.kop.utils.ssl.SSLUtils;
import io.streamnative.pulsar.handlers.kop.utils.timer.SystemTimer;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.common.util.OrderedExecutor;
import org.apache.bookkeeper.common.util.OrderedScheduler;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.lang3.reflect.FieldUtils;
import org.apache.kafka.common.internals.Topic;
import org.apache.kafka.common.record.CompressionType;
import org.apache.kafka.common.utils.Time;
import org.apache.pulsar.broker.PulsarServerException;
import org.apache.pulsar.broker.ServiceConfiguration;
import org.apache.pulsar.broker.protocol.ProtocolHandler;
import org.apache.pulsar.broker.service.BrokerService;
import org.apache.pulsar.broker.service.TopicFactory;
import org.apache.pulsar.client.admin.PulsarAdmin;
import org.apache.pulsar.client.admin.PulsarAdminException;
import org.apache.pulsar.common.naming.NamespaceName;
import org.apache.pulsar.common.naming.TopicName;
import org.apache.pulsar.common.policies.data.ClusterData;
import org.apache.pulsar.common.util.FutureUtil;
import org.eclipse.jetty.util.ssl.SslContextFactory;

/**
 * Kafka Protocol Handler load and run by Pulsar Service.
 */
@Slf4j
public class KafkaProtocolHandler implements ProtocolHandler, TenantContextManager {

    public static final String PROTOCOL_NAME = "kafka";
    public static final String TLS_HANDLER = "tls";
    @Getter
    private RequestStats requestStats;
    private PrometheusMetricsProvider statsProvider;
    private LookupService lookupService;
    @VisibleForTesting
    @Getter
    private AdminManager adminManager = null;
    private SystemTopicClient txnTopicClient;
    private DelayedOperationPurgatory producePurgatory;
    private DelayedOperationPurgatory fetchPurgatory;

    private KafkaTopicLookupService kafkaTopicLookupService;
    @VisibleForTesting
    @Getter
    private Map> channelInitializerMap;

    @Getter
    @VisibleForTesting
    protected SystemTopicClient offsetTopicClient;

    @Getter
    private KafkaServiceConfiguration kafkaConfig;
    private BrokerService brokerService;
    private KafkaTopicManagerSharedState kafkaTopicManagerSharedState;

    @Getter
    private KopEventManager kopEventManager;
    private OrderedScheduler sendResponseScheduler;
    @VisibleForTesting
    @Getter
    private NamespaceBundleOwnershipListenerImpl bundleListener;
    @VisibleForTesting
    @Getter
    private SchemaRegistryManager schemaRegistryManager;
    private MigrationManager migrationManager;
    private ReplicaManager replicaManager;

    private ScheduledFuture txUpdatedPurgeAbortedTxOffsetsTimeHandle;

    private final Map groupCoordinatorsByTenant = new ConcurrentHashMap<>();
    @VisibleForTesting
    @Getter
    private final Map transactionCoordinatorByTenant = new ConcurrentHashMap<>();

    @VisibleForTesting
    @Getter
    private OrderedExecutor recoveryExecutor;

    private ClusterData clusterData;

    @Override
    public GroupCoordinator getGroupCoordinator(String tenant) {
        return groupCoordinatorsByTenant.computeIfAbsent(tenant, this::createAndBootGroupCoordinator);
    }

    @VisibleForTesting
    public Map getGroupCoordinators() {
        return groupCoordinatorsByTenant;
    }

    @Override
    public TransactionCoordinator getTransactionCoordinator(String tenant) {
        return transactionCoordinatorByTenant.computeIfAbsent(tenant, this::createAndBootTransactionCoordinator);
    }

    public ReplicaManager getReplicaManager() {
        return replicaManager;
    }

    @Override
    public String protocolName() {
        return PROTOCOL_NAME;
    }

    @Override
    public boolean accept(String protocol) {
        return PROTOCOL_NAME.equalsIgnoreCase(protocol);
    }

    @Override
    public void initialize(ServiceConfiguration conf) throws Exception {
        // init config
        if (conf instanceof KafkaServiceConfiguration) {
            // in unit test, passed in conf will be KafkaServiceConfiguration
            kafkaConfig = (KafkaServiceConfiguration) conf;
        } else {
            // when loaded with PulsarService as NAR, `conf` will be type of ServiceConfiguration
            kafkaConfig = ConfigurationUtils.create(conf.getProperties(), KafkaServiceConfiguration.class);

            // some of the configs value in conf.properties may not updated.
            // So need to get latest value from conf itself
            kafkaConfig.setAdvertisedAddress(conf.getAdvertisedAddress());
            kafkaConfig.setBindAddress(conf.getBindAddress());
        }

        // Validate the namespaces
        for (String fullNamespace : kafkaConfig.getKopAllowedNamespaces()) {
            final String[] tokens = fullNamespace.split("/");
            if (tokens.length != 2) {
                throw new IllegalArgumentException(
                        "Invalid namespace '" + fullNamespace + "' in kopAllowedNamespaces config");
            }
            NamespaceName.validateNamespaceName(
                    tokens[0].replace(KafkaServiceConfiguration.TENANT_PLACEHOLDER, kafkaConfig.getKafkaTenant()),
                    tokens[1].replace("*", kafkaConfig.getKafkaNamespace()));
        }

        statsProvider = new PrometheusMetricsProvider();
        StatsLogger rootStatsLogger = statsProvider.getStatsLogger("");
        requestStats = new RequestStats(rootStatsLogger.scope(SERVER_SCOPE));
        sendResponseScheduler = OrderedScheduler.newSchedulerBuilder()
                .name("send-response")
                .numThreads(kafkaConfig.getNumSendKafkaResponseThreads())
                .build();

        // Check if `kafka_transaction_entry_filter` is configured when the transaction coordinator is enabled;
        // if not, configure it.
        if (kafkaConfig.isKafkaTransactionCoordinatorEnabled()) {
            if (!Objects.equals(kafkaConfig.getEntryFiltersDirectory(), kafkaConfig.getProtocolHandlerDirectory())) {
                kafkaConfig.setEntryFiltersDirectory(kafkaConfig.getProtocolHandlerDirectory());
                log.info("Set entryFiltersDirectory to {}", kafkaConfig.getEntryFiltersDirectory());
            }

            if (!kafkaConfig.getEntryFilterNames().contains("kafka_transaction_entry_filter")) {
                kafkaConfig.getEntryFilterNames().add("kafka_transaction_entry_filter");
                log.info("Add `kafka_transaction_entry_filter` to entryFilterNames");
            }
        }
    }

    // This method is called after initialize
    @Override
    public String getProtocolDataToAdvertise() {
        String result =  kafkaConfig.getKafkaAdvertisedListeners();
        log.info("Advertised addresses for the 'kafka' endpoint: {}", result);
        return result;
    }

    @Override
    public void start(BrokerService service) {
        log.info("Starting KafkaProtocolHandler, kop version is: '{}'", KopVersion.getVersion());
        log.info("Git Revision {}", KopVersion.getGitSha());
        log.info("Built by {} on {} at {}",
            KopVersion.getBuildUser(),
            KopVersion.getBuildHost(),
            KopVersion.getBuildTime());

        brokerService = service;
        final var pulsar = brokerService.pulsar();
        registerConfig();
        PulsarAdmin pulsarAdmin;
        try {
            pulsarAdmin = pulsar.getAdminClient();
            adminManager = new AdminManager(pulsarAdmin, kafkaConfig);
        } catch (PulsarServerException e) {
            log.error("Failed to get pulsarAdmin", e);
            throw new IllegalStateException(e);
        }

        offsetTopicClient = new SystemTopicClient(pulsar, kafkaConfig);
        txnTopicClient = new SystemTopicClient(pulsar, kafkaConfig);
        lookupService = new PulsarClientLookupService(pulsar, kafkaConfig);
        kafkaTopicManagerSharedState = new KafkaTopicManagerSharedState(brokerService);

        // Listener for invalidating the global Broker ownership cache
        bundleListener = new NamespaceBundleOwnershipListenerImpl(brokerService);

        bundleListener.addTopicOwnershipListener(new TopicOwnershipListener() {

            @Override
            public void whenUnload(TopicName topicName) {
                invalidateBundleCache(topicName);
                invalidatePartitionLog(topicName);
            }

            @Override
            public void whenDelete(TopicName topicName) {
                invalidateBundleCache(topicName);
                invalidatePartitionLog(topicName);
            }

            @Override
            public boolean interestedInEvent(NamespaceName namespaceName, EventType event) {
                switch (event) {
                    case UNLOAD:
                    case DELETE:
                        return true;
                }
                return false;
            }

            @Override
            public String name() {
                return "CacheInvalidator";
            }

            private void invalidateBundleCache(TopicName topicName) {
                kafkaTopicManagerSharedState.deReference(topicName.toString());
                if (!topicName.isPartitioned()) {
                    String nonPartitionedTopicName = topicName.getPartition(0).toString();
                    kafkaTopicManagerSharedState.deReference(nonPartitionedTopicName);
                }
            }

            private void invalidatePartitionLog(TopicName topicName) {
                getReplicaManager().removePartitionLog(topicName.toString());
                if (!topicName.isPartitioned()) {
                    getReplicaManager().removePartitionLog(topicName.getPartition(0).toString());
                }
            }
        });
        bundleListener.register();

        recoveryExecutor = OrderedExecutor
                .newBuilder()
                .name("kafka-tx-recovery")
                .numThreads(kafkaConfig.getKafkaTransactionRecoveryNumThreads())
                .build();

        if (kafkaConfig.isKafkaManageSystemNamespaces()) {
            // initialize default Group Coordinator
            getGroupCoordinator(kafkaConfig.getKafkaMetadataTenant());
        }

        // init KopEventManager
        kopEventManager = new KopEventManager(adminManager,
                pulsar.getLocalMetadataStore(),
                requestStats.getStatsLogger(),
                kafkaConfig,
                groupCoordinatorsByTenant);
        kopEventManager.start();

        if (kafkaConfig.isKafkaTransactionCoordinatorEnabled() && kafkaConfig.isKafkaManageSystemNamespaces()) {
            getTransactionCoordinator(kafkaConfig.getKafkaMetadataTenant());
        }

        Configuration conf = new PropertiesConfiguration();
        conf.addProperty("prometheusStatsLatencyRolloverSeconds",
            kafkaConfig.getKopPrometheusStatsLatencyRolloverSeconds());
        conf.addProperty("cluster", kafkaConfig.getClusterName());
        statsProvider.start(conf);
        pulsar.addPrometheusRawMetricsProvider(statsProvider);
        migrationManager = new MigrationManager(kafkaConfig, pulsar);
        initSchemaRegistry(pulsarAdmin);

        if (kafkaConfig.isKafkaTransactionCoordinatorEnabled()
                && kafkaConfig.getKafkaTxnPurgeAbortedTxnIntervalSeconds() > 0) {
            txUpdatedPurgeAbortedTxOffsetsTimeHandle = service.getPulsar().getExecutor().scheduleWithFixedDelay(() ->
                    getReplicaManager().updatePurgeAbortedTxnsOffsets(),
                kafkaConfig.getKafkaTxnPurgeAbortedTxnIntervalSeconds(),
                kafkaConfig.getKafkaTxnPurgeAbortedTxnIntervalSeconds(),
                    TimeUnit.SECONDS);
        }

        final TopicFactory kopTopicFactory = new KopTopicFactory();
        try {
            FieldUtils.writeDeclaredField(brokerService, "topicFactory", kopTopicFactory, true);
        } catch (IllegalAccessException e) {
            throw new RuntimeException(e);
        }
    }

    private TransactionCoordinator createAndBootTransactionCoordinator(String tenant) {
        log.info("createAndBootTransactionCoordinator {}", tenant);
        try {
            PulsarAdmin pulsarAdmin = brokerService.getPulsar().getAdminClient();
            MetadataUtils.createTxnMetadataIfMissing(tenant, pulsarAdmin, clusterData, kafkaConfig);
            TransactionCoordinator transactionCoordinator = initTransactionCoordinator(tenant, pulsarAdmin);
            // Listening transaction topic load/unload
            final NamespaceName kafkaMetaNs = NamespaceName.get(tenant, kafkaConfig.getKafkaMetadataNamespace());
            final String metadataNamespace = kafkaConfig.getKafkaMetadataNamespace();
            bundleListener.addTopicOwnershipListener(new TopicOwnershipListener() {
                @Override
                public void whenLoad(TopicName topicName) {
                    if (KopTopic.isTransactionMetadataTopicName(topicName.toString(), metadataNamespace)) {
                        transactionCoordinator.handleTxnImmigration(topicName.getPartitionIndex())
                            .exceptionally(e -> {
                                log.error("Failed to load transaction state from {}", topicName, e);
                                return null;
                            });
                    }
                }

                @Override
                public void whenUnload(TopicName topicName) {
                    if (KopTopic.isTransactionMetadataTopicName(topicName.toString(), metadataNamespace)) {
                        transactionCoordinator.handleTxnEmigration(topicName.getPartitionIndex());
                    }
                }

                @Override
                public String name() {
                    return "TransactionStateRecover-" + transactionCoordinator.getTopicPartitionName();
                }

                @Override
                public boolean interestedInEvent(NamespaceName namespaceName, EventType event) {
                    switch (event) {
                        case LOAD:
                        case UNLOAD:
                            return namespaceName.equals(kafkaMetaNs);
                        default:
                            return false;
                    }
                }
            });
            return transactionCoordinator;
        } catch (Exception e) {
            log.error("Initialized transaction coordinator failed.", e);
            throw new IllegalStateException(e);
        }
    }

    private GroupCoordinator createAndBootGroupCoordinator(String tenant) {
        log.info("createAndBootGroupCoordinator {}", tenant);

        GroupCoordinator groupCoordinator;
        try {
            MetadataUtils.createOffsetMetadataIfMissing(tenant, brokerService.getPulsar().getAdminClient(),
                    getClusterData(), kafkaConfig);

            // init and start group coordinator
            groupCoordinator = startGroupCoordinator(tenant, offsetTopicClient);

            // and listener for Offset topics load/unload
            final NamespaceName kafkaMetaNs = NamespaceName.get(tenant, kafkaConfig.getKafkaMetadataNamespace());
            final String metadataNamespace = kafkaConfig.getKafkaMetadataNamespace();
            bundleListener.addTopicOwnershipListener(new TopicOwnershipListener() {
                @Override
                public void whenLoad(TopicName topicName) {
                    if (KopTopic.isGroupMetadataTopicName(topicName.toString(), metadataNamespace)) {
                        groupCoordinator.handleGroupImmigration(topicName.getPartitionIndex());
                    }
                }

                @Override
                public void whenUnload(TopicName topicName) {
                    if (KopTopic.isGroupMetadataTopicName(topicName.toString(), metadataNamespace)) {
                        groupCoordinator.handleGroupEmigration(topicName.getPartitionIndex());
                    }
                }

                @Override
                public String name() {
                    return "OffsetAndTopicListener-" + groupCoordinator.getGroupManager().getTopicPartitionName();
                }

                @Override
                public boolean interestedInEvent(NamespaceName namespaceName, EventType event) {
                    switch (event) {
                        case LOAD:
                        case UNLOAD:
                            return namespaceName.equals(kafkaMetaNs);
                        default:
                            return false;
                    }
                }

            });
        } catch (Exception e) {
            log.error("Failed to create offset metadata", e);
            throw new IllegalStateException(e);
        }

        // init kafka namespaces
        try {
            MetadataUtils.createKafkaNamespaceIfMissing(brokerService.getPulsar().getAdminClient(),
                    clusterData, kafkaConfig);
        } catch (Exception e) {
            // no need to throw exception since we can create kafka namespace later
            log.warn("init kafka failed, need to create it manually later", e);
        }

        return groupCoordinator;
    }

    private KafkaChannelInitializer newKafkaChannelInitializer(final EndPoint endPoint) {
        return new KafkaChannelInitializer(
                brokerService.getPulsar(),
                kafkaConfig,
                this,
                replicaManager,
                lookupService,
                adminManager,
                fetchPurgatory,
                endPoint.isTlsEnabled(),
                endPoint,
                kafkaConfig.isSkipMessagesWithoutIndex(),
                requestStats,
                sendResponseScheduler,
                kafkaTopicManagerSharedState,
                kafkaTopicLookupService);
    }

    protected final ProducerStateManagerSnapshotBufferFactory getProducerStateManagerSnapshotBufferByTenant =
        tenant -> {
            TransactionCoordinator transactionCoordinator;
            if (kafkaConfig.isKafkaEnableMultiTenantMetadata()) {
                transactionCoordinator = getTransactionCoordinator(tenant);
            } else {
                transactionCoordinator = getTransactionCoordinator(kafkaConfig.getKafkaMetadataTenant());
            }
            return transactionCoordinator.getProducerStateManagerSnapshotBuffer();
        };

    // this is called after initialize, and with kafkaConfig, brokerService all set.
    @Override
    public Map> newChannelInitializers() {
        checkState(kafkaConfig != null);
        checkState(brokerService != null);

        fetchPurgatory = DelayedOperationPurgatory.builder()
                .purgatoryName("fetch")
                .timeoutTimer(SystemTimer.builder().executorName("fetch").build())
                .build();

        kafkaTopicLookupService = new KafkaTopicLookupService(brokerService);

        replicaManager = new ReplicaManager(
                kafkaConfig,
                requestStats,
                Time.SYSTEM,
                brokerService.getEntryFilterProvider().getBrokerEntryFilters(),
                fetchPurgatory,
                kafkaTopicLookupService,
                getProducerStateManagerSnapshotBufferByTenant,
                recoveryExecutor
        );

        try {
            ImmutableMap.Builder> builder =
                    ImmutableMap.builder();

            EndPoint.parseListeners(kafkaConfig.getListeners(), kafkaConfig.getKafkaProtocolMap()).
                    forEach((listener, endPoint) ->
                            builder.put(endPoint.getInetAddress(), newKafkaChannelInitializer(endPoint)));

            Optional migrationChannelInitializer = migrationManager.build();
            migrationChannelInitializer.ifPresent(
                    initializer -> builder.put(migrationManager.getAddress(),
                            initializer));

            kafkaConfig.getSchemaRegistryListeners().forEach(namedURI -> {
                try {
                    InetSocketAddress addr =
                        new InetSocketAddress(namedURI.uri().getHost(), namedURI.uri().getPort());
                    Optional schemaRegistryChannelInitializer;
                    if ("https".equals(namedURI.uri().getScheme())) {
                        schemaRegistryChannelInitializer = schemaRegistryManager.build((pipeline -> {
                            try {
                                SslContextFactory.Server sslContextFactory =
                                    SSLUtils.createSslContextFactory(kafkaConfig);
                                SslHandler sslHandler = new SslHandler(SSLUtils.createSslEngine(sslContextFactory));
                                pipeline.addLast(TLS_HANDLER, sslHandler);
                            } catch (Exception e) {
                                throw new RuntimeException(e);
                            }
                        }), requestStats.generateSchemaRegistryStats());
                    } else {
                        schemaRegistryChannelInitializer = schemaRegistryManager.build(
                                (pipeline -> {}), requestStats.generateSchemaRegistryStats());
                    }
                    schemaRegistryChannelInitializer.ifPresent(initializer -> {
                        builder.put(addr, initializer);
                    });
                } catch (Exception e) {
                    log.error("KafkaProtocolHandler add schema registry channel failed with ", e);
                }
            });

            channelInitializerMap = builder.build();
            return channelInitializerMap;
        } catch (Exception e){
            log.error("KafkaProtocolHandler newChannelInitializers failed with ", e);
            return null;
        }
    }

    @Override
    public void close() {
        if (bundleListener != null) {
            bundleListener.close();
        }

        if (txUpdatedPurgeAbortedTxOffsetsTimeHandle != null) {
            txUpdatedPurgeAbortedTxOffsetsTimeHandle.cancel(false);
        }

        if (producePurgatory != null) {
            producePurgatory.shutdown();
        }
        if (fetchPurgatory != null) {
            fetchPurgatory.shutdown();
        }
        groupCoordinatorsByTenant.values().forEach(GroupCoordinator::shutdown);
        if (kopEventManager != null) {
            kopEventManager.close();
        }
        final var closeHandles = new ArrayList>();
        if (schemaRegistryManager != null) {
            closeHandles.add(schemaRegistryManager.closeAsync());
        }
        if (kafkaTopicManagerSharedState != null) {
            kafkaTopicManagerSharedState.close();
        }
        if (statsProvider != null) {
            statsProvider.stop();
        }
        if (sendResponseScheduler != null) {
            sendResponseScheduler.shutdown();
        }

        if (adminManager != null) {
            adminManager.shutdown();
        }

        if (offsetTopicClient != null) {
            closeHandles.add(offsetTopicClient.closeAsync());
        }
        if (txnTopicClient != null) {
            if (replicaManager != null) {
                if (kafkaConfig.isKafkaProducerStateStoreInMetadata()) {
                    closeHandles.add(replicaManager.closeAsync());
                    closeHandles.add(txnTopicClient.closeAsync());
                } else {
                    closeHandles.add(replicaManager.closeAsync().thenCompose(__ -> txnTopicClient.closeAsync()));
                }
            } else {
                closeHandles.add(txnTopicClient.closeAsync());
            }
        }
        if (lookupService != null) {
            closeHandles.add(lookupService.closeAsync());
        }
        if (adminManager != null) {
            adminManager.shutdown();
        }
        KerberosLogin.release();

        // do not block the broker forever
        // see https://github.com/apache/pulsar/issues/19579
        try {
            FutureUtil
                    .waitForAll(closeHandles)
                    .get(Math.max(kafkaConfig.getBrokerShutdownTimeoutMs() / 10, 1000),
                            TimeUnit.MILLISECONDS);
            // It might close an executor from recoveryExecutor
            transactionCoordinatorByTenant.values().forEach(TransactionCoordinator::shutdown);
            recoveryExecutor.shutdown();
        } catch (ExecutionException err) {
            log.warn("Error while closing some of the internal PulsarClients", err.getCause());
        } catch (TimeoutException err) {
            log.warn("Could not stop all the internal PulsarClients within the configured timeout");
        } catch (InterruptedException err) {
            Thread.currentThread().interrupt();
            log.warn("Could not stop all the internal PulsarClients");
        }
    }

    @VisibleForTesting
    protected GroupCoordinator startGroupCoordinator(String tenant, SystemTopicClient client) {
        GroupConfig groupConfig = new GroupConfig(
            kafkaConfig.getGroupMinSessionTimeoutMs(),
            kafkaConfig.getGroupMaxSessionTimeoutMs(),
            kafkaConfig.getGroupInitialRebalanceDelayMs()
        );

        String topicName = tenant + "/" + kafkaConfig.getKafkaMetadataNamespace()
                + "/" + Topic.GROUP_METADATA_TOPIC_NAME;

        PulsarAdmin pulsarAdmin;
        int offsetTopicNumPartitions;
        try {
            pulsarAdmin = brokerService.getPulsar().getAdminClient();
            offsetTopicNumPartitions = pulsarAdmin.topics().getPartitionedTopicMetadata(topicName).partitions;
            if (offsetTopicNumPartitions == 0) {
                log.error("Offset topic should not be a non-partitioned topic.");
                throw new IllegalStateException("Offset topic should not be a non-partitioned topic.");
            }
        }  catch (PulsarServerException | PulsarAdminException e) {
            log.error("Failed to get offset topic partition metadata .", e);
            throw new IllegalStateException(e);
        }

        String namespacePrefixForMetadata = MetadataUtils.constructMetadataNamespace(tenant, kafkaConfig);

        OffsetConfig offsetConfig = OffsetConfig.builder()
            .offsetsTopicName(topicName)
            .offsetsTopicNumPartitions(offsetTopicNumPartitions)
            .offsetsTopicCompressionType(CompressionType.valueOf(kafkaConfig.getOffsetsTopicCompressionCodec()))
            .maxMetadataSize(kafkaConfig.getOffsetMetadataMaxSize())
            .offsetsRetentionCheckIntervalMs(kafkaConfig.getOffsetsRetentionCheckIntervalMs())
            .offsetsRetentionMs(TimeUnit.MINUTES.toMillis(kafkaConfig.getOffsetsRetentionMinutes()))
            .offsetCommitTimeoutMs(kafkaConfig.getOffsetCommitTimeoutMs())
            .recoveryOperationTimeoutMs(kafkaConfig.getMetadataRecoveryOperationTimeoutMs())
            .build();

        String kafkaNamespacePrefix = kafkaConfig.getDefaultNamespacePrefix().replaceAll("/", ".");
        GroupCoordinator groupCoordinator = GroupCoordinator.of(
            tenant,
            client,
            groupConfig,
            offsetConfig,
            namespacePrefixForMetadata,
            kafkaNamespacePrefix,
            SystemTimer.builder()
                .executorName("group-coordinator-timer")
                .build(),
            Time.SYSTEM
        );
        // always enable metadata expiration
        groupCoordinator.startup(true);

        return groupCoordinator;
    }

    public TransactionCoordinator initTransactionCoordinator(String tenant, PulsarAdmin pulsarAdmin) throws Exception {
        String topicName = tenant + "/" + kafkaConfig.getKafkaMetadataNamespace()
            + "/" + Topic.TRANSACTION_STATE_TOPIC_NAME;

        int txnLogTopicNumPartitions;
        try {
            txnLogTopicNumPartitions = pulsarAdmin.topics().getPartitionedTopicMetadata(topicName).partitions;
            if (txnLogTopicNumPartitions == 0) {
                log.error("Txn log topic should not be a non-partitioned topic.");
                throw new IllegalStateException("Txn log topic should not be a non-partitioned topic.");
            }
        } catch (PulsarAdminException e) {
            log.error("Failed to get Txn log topic partition metadata .", e);
            throw new IllegalStateException(e);
        }
        TransactionConfig transactionConfig = TransactionConfig.builder()
                .transactionLogNumPartitions(txnLogTopicNumPartitions)
                .transactionMetadataTopicName(MetadataUtils.constructTxnLogTopicBaseName(tenant, kafkaConfig))
                .transactionProducerIdTopicName(MetadataUtils.constructTxnProducerIdTopicBaseName(tenant, kafkaConfig))
                .transactionProducerStateSnapshotTopicName(MetadataUtils.constructTxProducerStateTopicBaseName(tenant,
                        kafkaConfig))
                .producerStateTopicNumPartitions(kafkaConfig.getKafkaTxnProducerStateTopicNumPartitions())
                .abortTimedOutTransactionsIntervalMs(kafkaConfig.getKafkaTxnAbortTimedOutTransactionCleanupIntervalMs())
                .transactionalIdExpirationMs(kafkaConfig.getKafkaTransactionalIdExpirationMs())
                .removeExpiredTransactionalIdsIntervalMs(
                        kafkaConfig.getKafkaTransactionsRemoveExpiredTransactionalIdCleanupIntervalMs())
                .brokerId(kafkaConfig.getKafkaBrokerId())
                .recoveryOperationTimeoutMs(kafkaConfig.getMetadataRecoveryOperationTimeoutMs())
                .build();

        TransactionCoordinator transactionCoordinator = TransactionCoordinator.of(
            brokerService.getPulsar(),
            tenant,
            kafkaConfig,
            transactionConfig,
            txnTopicClient,
            brokerService.getPulsar().getLocalMetadataStore(),
            lookupService,
            OrderedScheduler
                .newSchedulerBuilder()
                .name("transaction-log-manager-" + tenant)
                .numThreads(1)
                .build(),
            Time.SYSTEM,
            recoveryExecutor);

        transactionCoordinator.startup(kafkaConfig.isKafkaTransactionalIdExpirationEnable()).get();

        return transactionCoordinator;
    }

    private void initSchemaRegistry(PulsarAdmin pulsarAdmin) {
        schemaRegistryManager = new SchemaRegistryManager(
                kafkaConfig, brokerService.getPulsar(), brokerService.getAuthenticationService());
        if (kafkaConfig.isKopSchemaRegistryEnable()) {
            MetadataUtils.createSchemaRegistryMetadataIfMissing(
                    kafkaConfig.getKafkaMetadataTenant(), pulsarAdmin, getClusterData(), kafkaConfig);
        }
    }

    private ClusterData getClusterData() {
        if (clusterData == null) {
            clusterData = ClusterData.builder()
                    .serviceUrl(brokerService.getPulsar().getWebServiceAddress())
                    .serviceUrlTls(brokerService.getPulsar().getWebServiceAddressTls())
                    .brokerServiceUrl(brokerService.getPulsar().getBrokerServiceUrl())
                    .brokerServiceUrlTls(brokerService.getPulsar().getBrokerServiceUrlTls())
                    .build();
        }
        return clusterData;
    }

    private void registerConfig() {
        // TODO: Implement the validator for the configuration
        brokerService.registerCustomDynamicConfiguration(KafkaServiceConfiguration.KOP_ALLOWED_NAMESPACES, __ -> true);
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy