All Downloads are FREE. Search and download functionalities are using the official Maven repository.

kafka.server.DynamicBrokerConfig.scala Maven / Gradle / Ivy

There is a newer version: 3.7.0
Show newest version
/**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
  * The ASF licenses this file to You under the Apache License, Version 2.0
  * (the "License"); you may not use this file except in compliance with
  * the License.  You may obtain a copy of the License at
  *
  * http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */

package kafka.server

import java.util
import java.util.{Collections, Properties}
import java.util.concurrent.locks.ReentrantReadWriteLock

import kafka.cluster.EndPoint
import kafka.log.{LogCleaner, LogConfig, LogManager}
import kafka.server.DynamicBrokerConfig._
import kafka.utils.{CoreUtils, Logging, PasswordEncoder}
import kafka.zk.{AdminZkClient, KafkaZkClient}
import org.apache.kafka.common.Reconfigurable
import org.apache.kafka.common.config.{ConfigDef, ConfigException, SslConfigs}
import org.apache.kafka.common.metrics.MetricsReporter
import org.apache.kafka.common.config.types.Password
import org.apache.kafka.common.network.{ListenerName, ListenerReconfigurable}
import org.apache.kafka.common.security.authenticator.LoginManager
import org.apache.kafka.common.utils.Utils

import scala.collection._
import scala.collection.JavaConverters._

/**
  * Dynamic broker configurations are stored in ZooKeeper and may be defined at two levels:
  * 
    *
  • Per-broker configs persisted at /configs/brokers/{brokerId}: These can be described/altered * using AdminClient using the resource name brokerId.
  • *
  • Cluster-wide defaults persisted at /configs/brokers/<default>: These can be described/altered * using AdminClient using an empty resource name.
  • *
* The order of precedence for broker configs is: *
    *
  1. DYNAMIC_BROKER_CONFIG: stored in ZK at /configs/brokers/{brokerId}
  2. *
  3. DYNAMIC_DEFAULT_BROKER_CONFIG: stored in ZK at /configs/brokers/<default>
  4. *
  5. STATIC_BROKER_CONFIG: properties that broker is started up with, typically from server.properties file
  6. *
  7. DEFAULT_CONFIG: Default configs defined in KafkaConfig
  8. *
* Log configs use topic config overrides if defined and fallback to broker defaults using the order of precedence above. * Topic config overrides may use a different config name from the default broker config. * See [[kafka.log.LogConfig#TopicConfigSynonyms]] for the mapping. *

* AdminClient returns all config synonyms in the order of precedence when configs are described with * includeSynonyms. In addition to configs that may be defined with the same name at different levels, * some configs have additional synonyms. *

*
    *
  • Listener configs may be defined using the prefix listener.name.{listenerName}.{configName}. These may be * configured as dynamic or static broker configs. Listener configs have higher precedence than the base configs * that don't specify the listener name. Listeners without a listener config use the base config. Base configs * may be defined only as STATIC_BROKER_CONFIG or DEFAULT_CONFIG and cannot be updated dynamically.
  • *
  • Some configs may be defined using multiple properties. For example, log.roll.ms and * log.roll.hours refer to the same config that may be defined in milliseconds or hours. The order of * precedence of these synonyms is described in the docs of these configs in [[kafka.server.KafkaConfig]].
  • *
* */ object DynamicBrokerConfig { private[server] val DynamicSecurityConfigs = SslConfigs.RECONFIGURABLE_CONFIGS.asScala val AllDynamicConfigs = DynamicSecurityConfigs ++ LogCleaner.ReconfigurableConfigs ++ DynamicLogConfig.ReconfigurableConfigs ++ DynamicThreadPool.ReconfigurableConfigs ++ Set(KafkaConfig.MetricReporterClassesProp) ++ DynamicListenerConfig.ReconfigurableConfigs private val PerBrokerConfigs = DynamicSecurityConfigs ++ DynamicListenerConfig.ReconfigurableConfigs private val ListenerMechanismConfigs = Set(KafkaConfig.SaslJaasConfigProp) val ListenerConfigRegex = """listener\.name\.[^.]*\.(.*)""".r private val DynamicPasswordConfigs = { val passwordConfigs = KafkaConfig.configKeys.filter(_._2.`type` == ConfigDef.Type.PASSWORD).keySet AllDynamicConfigs.intersect(passwordConfigs) } def isPasswordConfig(name: String): Boolean = DynamicBrokerConfig.DynamicPasswordConfigs.exists(name.endsWith) def brokerConfigSynonyms(name: String, matchListenerOverride: Boolean): List[String] = { name match { case KafkaConfig.LogRollTimeMillisProp | KafkaConfig.LogRollTimeHoursProp => List(KafkaConfig.LogRollTimeMillisProp, KafkaConfig.LogRollTimeHoursProp) case KafkaConfig.LogRollTimeJitterMillisProp | KafkaConfig.LogRollTimeJitterHoursProp => List(KafkaConfig.LogRollTimeJitterMillisProp, KafkaConfig.LogRollTimeJitterHoursProp) case KafkaConfig.LogFlushIntervalMsProp => // LogFlushSchedulerIntervalMsProp is used as default List(KafkaConfig.LogFlushIntervalMsProp, KafkaConfig.LogFlushSchedulerIntervalMsProp) case KafkaConfig.LogRetentionTimeMillisProp | KafkaConfig.LogRetentionTimeMinutesProp | KafkaConfig.LogRetentionTimeHoursProp => List(KafkaConfig.LogRetentionTimeMillisProp, KafkaConfig.LogRetentionTimeMinutesProp, KafkaConfig.LogRetentionTimeHoursProp) case ListenerConfigRegex(baseName) if matchListenerOverride => // `ListenerMechanismConfigs` are specified as listenerPrefix.mechanism. // and other listener configs are specified as listenerPrefix. // Add as a synonym in both cases. val mechanismConfig = ListenerMechanismConfigs.find(baseName.endsWith) List(name, mechanismConfig.getOrElse(baseName)) case _ => List(name) } } def validateConfigs(props: Properties, perBrokerConfig: Boolean): Unit = { def checkInvalidProps(invalidPropNames: Set[String], errorMessage: String): Unit = { if (invalidPropNames.nonEmpty) throw new ConfigException(s"$errorMessage: $invalidPropNames") } checkInvalidProps(nonDynamicConfigs(props), "Cannot update these configs dynamically") checkInvalidProps(securityConfigsWithoutListenerPrefix(props), "These security configs can be dynamically updated only per-listener using the listener prefix") validateConfigTypes(props) if (!perBrokerConfig) { checkInvalidProps(perBrokerConfigs(props), "Cannot update these configs at default cluster level, broker id must be specified") } } private def perBrokerConfigs(props: Properties): Set[String] = { val configNames = props.asScala.keySet configNames.intersect(PerBrokerConfigs) ++ configNames.filter(ListenerConfigRegex.findFirstIn(_).nonEmpty) } private def nonDynamicConfigs(props: Properties): Set[String] = { props.asScala.keySet.intersect(DynamicConfig.Broker.nonDynamicProps) } private def securityConfigsWithoutListenerPrefix(props: Properties): Set[String] = { DynamicSecurityConfigs.filter(props.containsKey) } private def validateConfigTypes(props: Properties): Unit = { val baseProps = new Properties props.asScala.foreach { case (ListenerConfigRegex(baseName), v) => baseProps.put(baseName, v) case (k, v) => baseProps.put(k, v) } DynamicConfig.Broker.validate(baseProps) } private[server] def addDynamicConfigs(configDef: ConfigDef): Unit = { KafkaConfig.configKeys.filterKeys(AllDynamicConfigs.contains).values.foreach { config => configDef.define(config.name, config.`type`, config.defaultValue, config.validator, config.importance, config.documentation, config.group, config.orderInGroup, config.width, config.displayName, config.dependents, config.recommender) } } private[server] def dynamicConfigUpdateModes: util.Map[String, String] = { AllDynamicConfigs.map { name => val mode = if (PerBrokerConfigs.contains(name)) "per-broker" else "cluster-wide" (name -> mode) }.toMap.asJava } } class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging { private[server] val staticBrokerConfigs = ConfigDef.convertToStringMapWithPasswordValues(kafkaConfig.originalsFromThisConfig).asScala private[server] val staticDefaultConfigs = ConfigDef.convertToStringMapWithPasswordValues(KafkaConfig.defaultValues.asJava).asScala private val dynamicBrokerConfigs = mutable.Map[String, String]() private val dynamicDefaultConfigs = mutable.Map[String, String]() private val reconfigurables = mutable.Buffer[Reconfigurable]() private val brokerReconfigurables = mutable.Buffer[BrokerReconfigurable]() private val lock = new ReentrantReadWriteLock private var currentConfig = kafkaConfig private val dynamicConfigPasswordEncoder = maybeCreatePasswordEncoder(kafkaConfig.passwordEncoderSecret) private[server] def initialize(zkClient: KafkaZkClient): Unit = { currentConfig = new KafkaConfig(kafkaConfig.props, false, None) val adminZkClient = new AdminZkClient(zkClient) updateDefaultConfig(adminZkClient.fetchEntityConfig(ConfigType.Broker, ConfigEntityName.Default)) val props = adminZkClient.fetchEntityConfig(ConfigType.Broker, kafkaConfig.brokerId.toString) val brokerConfig = maybeReEncodePasswords(props, adminZkClient) updateBrokerConfig(kafkaConfig.brokerId, brokerConfig) } def addReconfigurables(kafkaServer: KafkaServer): Unit = { addBrokerReconfigurable(new DynamicThreadPool(kafkaServer)) if (kafkaServer.logManager.cleaner != null) addBrokerReconfigurable(kafkaServer.logManager.cleaner) addReconfigurable(new DynamicLogConfig(kafkaServer.logManager, kafkaServer)) addReconfigurable(new DynamicMetricsReporters(kafkaConfig.brokerId, kafkaServer)) addReconfigurable(new DynamicClientQuotaCallback(kafkaConfig.brokerId, kafkaServer)) addBrokerReconfigurable(new DynamicListenerConfig(kafkaServer)) } def addReconfigurable(reconfigurable: Reconfigurable): Unit = CoreUtils.inWriteLock(lock) { verifyReconfigurableConfigs(reconfigurable.reconfigurableConfigs.asScala) reconfigurables += reconfigurable } def addBrokerReconfigurable(reconfigurable: BrokerReconfigurable): Unit = CoreUtils.inWriteLock(lock) { verifyReconfigurableConfigs(reconfigurable.reconfigurableConfigs) brokerReconfigurables += reconfigurable } def removeReconfigurable(reconfigurable: Reconfigurable): Unit = CoreUtils.inWriteLock(lock) { reconfigurables -= reconfigurable } private def verifyReconfigurableConfigs(configNames: Set[String]): Unit = CoreUtils.inWriteLock(lock) { val nonDynamic = configNames.filter(DynamicConfig.Broker.nonDynamicProps.contains) require(nonDynamic.isEmpty, s"Reconfigurable contains non-dynamic configs $nonDynamic") } // Visibility for testing private[server] def currentKafkaConfig: KafkaConfig = CoreUtils.inReadLock(lock) { currentConfig } private[server] def currentDynamicBrokerConfigs: Map[String, String] = CoreUtils.inReadLock(lock) { dynamicBrokerConfigs.clone() } private[server] def currentDynamicDefaultConfigs: Map[String, String] = CoreUtils.inReadLock(lock) { dynamicDefaultConfigs.clone() } private[server] def updateBrokerConfig(brokerId: Int, persistentProps: Properties): Unit = CoreUtils.inWriteLock(lock) { try { val props = fromPersistentProps(persistentProps, perBrokerConfig = true) dynamicBrokerConfigs.clear() dynamicBrokerConfigs ++= props.asScala updateCurrentConfig() } catch { case e: Exception => error(s"Per-broker configs of $brokerId could not be applied: $persistentProps", e) } } private[server] def updateDefaultConfig(persistentProps: Properties): Unit = CoreUtils.inWriteLock(lock) { try { val props = fromPersistentProps(persistentProps, perBrokerConfig = false) dynamicDefaultConfigs.clear() dynamicDefaultConfigs ++= props.asScala updateCurrentConfig() } catch { case e: Exception => error(s"Cluster default configs could not be applied: $persistentProps", e) } } private def maybeCreatePasswordEncoder(secret: Option[Password]): Option[PasswordEncoder] = { secret.map { secret => new PasswordEncoder(secret, kafkaConfig.passwordEncoderKeyFactoryAlgorithm, kafkaConfig.passwordEncoderCipherAlgorithm, kafkaConfig.passwordEncoderKeyLength, kafkaConfig.passwordEncoderIterations) } } private def passwordEncoder: PasswordEncoder = { dynamicConfigPasswordEncoder.getOrElse(throw new ConfigException("Password encoder secret not configured")) } private[server] def toPersistentProps(configProps: Properties, perBrokerConfig: Boolean): Properties = { val props = configProps.clone().asInstanceOf[Properties] def encodePassword(configName: String, value: String): Unit = { if (value != null) { if (!perBrokerConfig) throw new ConfigException("Password config can be defined only at broker level") props.setProperty(configName, passwordEncoder.encode(new Password(value))) } } configProps.asScala.filterKeys(isPasswordConfig).foreach { case (name, value) => encodePassword(name, value) } props } private[server] def fromPersistentProps(persistentProps: Properties, perBrokerConfig: Boolean): Properties = { val props = persistentProps.clone().asInstanceOf[Properties] // Remove all invalid configs from `props` removeInvalidConfigs(props, perBrokerConfig) def removeInvalidProps(invalidPropNames: Set[String], errorMessage: String): Unit = { if (invalidPropNames.nonEmpty) { invalidPropNames.foreach(props.remove) error(s"$errorMessage: $invalidPropNames") } } removeInvalidProps(nonDynamicConfigs(props), "Non-dynamic configs configured in ZooKeeper will be ignored") removeInvalidProps(securityConfigsWithoutListenerPrefix(props), "Security configs can be dynamically updated only using listener prefix, base configs will be ignored") if (!perBrokerConfig) removeInvalidProps(perBrokerConfigs(props), "Per-broker configs defined at default cluster level will be ignored") def decodePassword(configName: String, value: String): Unit = { if (value != null) { try { props.setProperty(configName, passwordEncoder.decode(value).value) } catch { case e: Exception => error(s"Dynamic password config $configName could not be decoded, ignoring.", e) props.remove(configName) } } } props.asScala.filterKeys(isPasswordConfig).foreach { case (name, value) => decodePassword(name, value) } props } // If the secret has changed, password.encoder.old.secret contains the old secret that was used // to encode the configs in ZK. Decode passwords using the old secret and update ZK with values // encoded using the current secret. Ignore any errors during decoding since old secret may not // have been removed during broker restart. private def maybeReEncodePasswords(persistentProps: Properties, adminZkClient: AdminZkClient): Properties = { val props = persistentProps.clone().asInstanceOf[Properties] if (props.asScala.keySet.exists(isPasswordConfig)) { maybeCreatePasswordEncoder(kafkaConfig.passwordEncoderOldSecret).foreach { passwordDecoder => persistentProps.asScala.filterKeys(isPasswordConfig).foreach { case (configName, value) => if (value != null) { val decoded = try { Some(passwordDecoder.decode(value).value) } catch { case _: Exception => debug(s"Dynamic password config $configName could not be decoded using old secret, new secret will be used.") None } decoded.foreach { value => props.put(configName, passwordEncoder.encode(new Password(value))) } } } adminZkClient.changeBrokerConfig(Some(kafkaConfig.brokerId), props) } } props } private[server] def validate(props: Properties, perBrokerConfig: Boolean): Unit = CoreUtils.inReadLock(lock) { validateConfigs(props, perBrokerConfig) val newProps = mutable.Map[String, String]() newProps ++= staticBrokerConfigs if (perBrokerConfig) { overrideProps(newProps, dynamicDefaultConfigs) overrideProps(newProps, props.asScala) } else { overrideProps(newProps, props.asScala) overrideProps(newProps, dynamicBrokerConfigs) } processReconfiguration(newProps, validateOnly = true) } private def removeInvalidConfigs(props: Properties, perBrokerConfig: Boolean): Unit = { try { validateConfigTypes(props) props.asScala } catch { case e: Exception => val invalidProps = props.asScala.filter { case (k, v) => val props1 = new Properties props1.put(k, v) try { validateConfigTypes(props1) false } catch { case _: Exception => true } } invalidProps.foreach(props.remove) val configSource = if (perBrokerConfig) "broker" else "default cluster" error(s"Dynamic $configSource config contains invalid values: $invalidProps, these configs will be ignored", e) } } private[server] def maybeReconfigure(reconfigurable: Reconfigurable, oldConfig: KafkaConfig, newConfig: util.Map[String, _]): Unit = { if (reconfigurable.reconfigurableConfigs.asScala.exists(key => oldConfig.originals.get(key) != newConfig.get(key))) reconfigurable.reconfigure(newConfig) } private def updatedConfigs(newProps: java.util.Map[String, _], currentProps: java.util.Map[_, _]): mutable.Map[String, _] = { newProps.asScala.filter { case (k, v) => v != currentProps.get(k) } } /** * Updates values in `props` with the new values from `propsOverride`. Synonyms of updated configs * are removed from `props` to ensure that the config with the higher precedence is applied. For example, * if `log.roll.ms` was defined in server.properties and `log.roll.hours` is configured dynamically, * `log.roll.hours` from the dynamic configuration will be used and `log.roll.ms` will be removed from * `props` (even though `log.roll.hours` is secondary to `log.roll.ms`). */ private def overrideProps(props: mutable.Map[String, String], propsOverride: mutable.Map[String, String]): Unit = { propsOverride.foreach { case (k, v) => // Remove synonyms of `k` to ensure the right precedence is applied. But disable `matchListenerOverride` // so that base configs corresponding to listener configs are not removed. Base configs should not be removed // since they may be used by other listeners. It is ok to retain them in `props` since base configs cannot be // dynamically updated and listener-specific configs have the higher precedence. brokerConfigSynonyms(k, matchListenerOverride = false).foreach(props.remove) props.put(k, v) } } private def updateCurrentConfig(): Unit = { val newProps = mutable.Map[String, String]() newProps ++= staticBrokerConfigs overrideProps(newProps, dynamicDefaultConfigs) overrideProps(newProps, dynamicBrokerConfigs) val oldConfig = currentConfig val (newConfig, brokerReconfigurablesToUpdate) = processReconfiguration(newProps, validateOnly = false) if (newConfig ne currentConfig) { currentConfig = newConfig kafkaConfig.updateCurrentConfig(newConfig) // Process BrokerReconfigurable updates after current config is updated brokerReconfigurablesToUpdate.foreach(_.reconfigure(oldConfig, newConfig)) } } private def processReconfiguration(newProps: Map[String, String], validateOnly: Boolean): (KafkaConfig, List[BrokerReconfigurable]) = { val newConfig = new KafkaConfig(newProps.asJava, !validateOnly, None) val updatedMap = updatedConfigs(newConfig.originalsFromThisConfig, currentConfig.originals) if (updatedMap.nonEmpty) { try { val customConfigs = new util.HashMap[String, Object](newConfig.originalsFromThisConfig) // non-Kafka configs newConfig.valuesFromThisConfig.keySet.asScala.foreach(customConfigs.remove) reconfigurables.foreach { case listenerReconfigurable: ListenerReconfigurable => val listenerName = listenerReconfigurable.listenerName val oldValues = currentConfig.valuesWithPrefixOverride(listenerName.configPrefix) val newValues = newConfig.valuesFromThisConfigWithPrefixOverride(listenerName.configPrefix) val updatedKeys = updatedConfigs(newValues, oldValues).keySet if (needsReconfiguration(listenerReconfigurable.reconfigurableConfigs, updatedKeys)) processReconfigurable(listenerReconfigurable, updatedKeys, newValues, customConfigs, validateOnly) case reconfigurable => if (needsReconfiguration(reconfigurable.reconfigurableConfigs, updatedMap.keySet)) processReconfigurable(reconfigurable, updatedMap.keySet, newConfig.valuesFromThisConfig, customConfigs, validateOnly) } // BrokerReconfigurable updates are processed after config is updated. Only do the validation here. val brokerReconfigurablesToUpdate = mutable.Buffer[BrokerReconfigurable]() brokerReconfigurables.foreach { reconfigurable => if (needsReconfiguration(reconfigurable.reconfigurableConfigs.asJava, updatedMap.keySet)) { reconfigurable.validateReconfiguration(newConfig) if (!validateOnly) brokerReconfigurablesToUpdate += reconfigurable } } (newConfig, brokerReconfigurablesToUpdate.toList) } catch { case e: Exception => if (!validateOnly) error(s"Failed to update broker configuration with configs : ${newConfig.originalsFromThisConfig}", e) throw new ConfigException("Invalid dynamic configuration", e) } } else (currentConfig, List.empty) } private def needsReconfiguration(reconfigurableConfigs: util.Set[String], updatedKeys: Set[String]): Boolean = { reconfigurableConfigs.asScala.intersect(updatedKeys).nonEmpty } private def processReconfigurable(reconfigurable: Reconfigurable, updatedConfigNames: Set[String], allNewConfigs: util.Map[String, _], newCustomConfigs: util.Map[String, Object], validateOnly: Boolean): Unit = { val newConfigs = new util.HashMap[String, Object] allNewConfigs.asScala.foreach { case (k, v) => newConfigs.put(k, v.asInstanceOf[AnyRef]) } newConfigs.putAll(newCustomConfigs) try { reconfigurable.validateReconfiguration(newConfigs) } catch { case e: ConfigException => throw e case _: Exception => throw new ConfigException(s"Validation of dynamic config update of $updatedConfigNames failed with class ${reconfigurable.getClass}") } if (!validateOnly) { info(s"Reconfiguring $reconfigurable, updated configs: $updatedConfigNames custom configs: $newCustomConfigs") reconfigurable.reconfigure(newConfigs) } } } trait BrokerReconfigurable { def reconfigurableConfigs: Set[String] def validateReconfiguration(newConfig: KafkaConfig): Unit def reconfigure(oldConfig: KafkaConfig, newConfig: KafkaConfig): Unit } object DynamicLogConfig { // Exclude message.format.version for now since we need to check that the version // is supported on all brokers in the cluster. val ExcludedConfigs = Set(KafkaConfig.LogMessageFormatVersionProp) val ReconfigurableConfigs = LogConfig.TopicConfigSynonyms.values.toSet -- ExcludedConfigs val KafkaConfigToLogConfigName = LogConfig.TopicConfigSynonyms.map { case (k, v) => (v, k) } } class DynamicLogConfig(logManager: LogManager, server: KafkaServer) extends Reconfigurable with Logging { override def configure(configs: util.Map[String, _]): Unit = {} override def reconfigurableConfigs(): util.Set[String] = { DynamicLogConfig.ReconfigurableConfigs.asJava } override def validateReconfiguration(configs: util.Map[String, _]): Unit = { // For update of topic config overrides, only config names and types are validated // Names and types have already been validated. For consistency with topic config // validation, no additional validation is performed. } override def reconfigure(configs: util.Map[String, _]): Unit = { val currentLogConfig = logManager.currentDefaultConfig val origUncleanLeaderElectionEnable = logManager.currentDefaultConfig.uncleanLeaderElectionEnable val newBrokerDefaults = new util.HashMap[String, Object](currentLogConfig.originals) configs.asScala.filterKeys(DynamicLogConfig.ReconfigurableConfigs.contains).foreach { case (k, v) => if (v != null) { DynamicLogConfig.KafkaConfigToLogConfigName.get(k).foreach { configName => newBrokerDefaults.put(configName, v.asInstanceOf[AnyRef]) } } } logManager.reconfigureDefaultLogConfig(LogConfig(newBrokerDefaults)) logManager.allLogs.foreach { log => val props = mutable.Map.empty[Any, Any] props ++= newBrokerDefaults.asScala props ++= log.config.originals.asScala.filterKeys(log.config.overriddenConfigs.contains) val logConfig = LogConfig(props.asJava) log.updateConfig(newBrokerDefaults.asScala.keySet, logConfig) } if (logManager.currentDefaultConfig.uncleanLeaderElectionEnable && !origUncleanLeaderElectionEnable) { server.kafkaController.enableDefaultUncleanLeaderElection() } } } object DynamicThreadPool { val ReconfigurableConfigs = Set( KafkaConfig.NumIoThreadsProp, KafkaConfig.NumNetworkThreadsProp, KafkaConfig.NumReplicaFetchersProp, KafkaConfig.NumRecoveryThreadsPerDataDirProp, KafkaConfig.BackgroundThreadsProp) } class DynamicThreadPool(server: KafkaServer) extends BrokerReconfigurable { override def reconfigurableConfigs: Set[String] = { DynamicThreadPool.ReconfigurableConfigs } override def validateReconfiguration(newConfig: KafkaConfig): Unit = { newConfig.values.asScala.filterKeys(DynamicThreadPool.ReconfigurableConfigs.contains).foreach { case (k, v) => val newValue = v.asInstanceOf[Int] val oldValue = currentValue(k) if (newValue != oldValue) { val errorMsg = s"Dynamic thread count update validation failed for $k=$v" if (newValue <= 0) throw new ConfigException(s"$errorMsg, value should be at least 1") if (newValue < oldValue / 2) throw new ConfigException(s"$errorMsg, value should be at least half the current value $oldValue") if (newValue > oldValue * 2) throw new ConfigException(s"$errorMsg, value should not be greater than double the current value $oldValue") } } } override def reconfigure(oldConfig: KafkaConfig, newConfig: KafkaConfig): Unit = { if (newConfig.numIoThreads != oldConfig.numIoThreads) server.requestHandlerPool.resizeThreadPool(newConfig.numIoThreads) if (newConfig.numNetworkThreads != oldConfig.numNetworkThreads) server.socketServer.resizeThreadPool(oldConfig.numNetworkThreads, newConfig.numNetworkThreads) if (newConfig.numReplicaFetchers != oldConfig.numReplicaFetchers) server.replicaManager.replicaFetcherManager.resizeThreadPool(newConfig.numReplicaFetchers) if (newConfig.numRecoveryThreadsPerDataDir != oldConfig.numRecoveryThreadsPerDataDir) server.getLogManager.resizeRecoveryThreadPool(newConfig.numRecoveryThreadsPerDataDir) if (newConfig.backgroundThreads != oldConfig.backgroundThreads) server.kafkaScheduler.resizeThreadPool(newConfig.backgroundThreads) } private def currentValue(name: String): Int = { name match { case KafkaConfig.NumIoThreadsProp => server.config.numIoThreads case KafkaConfig.NumNetworkThreadsProp => server.config.numNetworkThreads case KafkaConfig.NumReplicaFetchersProp => server.config.numReplicaFetchers case KafkaConfig.NumRecoveryThreadsPerDataDirProp => server.config.numRecoveryThreadsPerDataDir case KafkaConfig.BackgroundThreadsProp => server.config.backgroundThreads case n => throw new IllegalStateException(s"Unexpected config $n") } } } class DynamicMetricsReporters(brokerId: Int, server: KafkaServer) extends Reconfigurable { private val dynamicConfig = server.config.dynamicConfig private val metrics = server.metrics private val propsOverride = Map[String, AnyRef](KafkaConfig.BrokerIdProp -> brokerId.toString) private val currentReporters = mutable.Map[String, MetricsReporter]() createReporters(dynamicConfig.currentKafkaConfig.getList(KafkaConfig.MetricReporterClassesProp), Collections.emptyMap[String, Object]) private[server] def currentMetricsReporters: List[MetricsReporter] = currentReporters.values.toList override def configure(configs: util.Map[String, _]): Unit = {} override def reconfigurableConfigs(): util.Set[String] = { val configs = new util.HashSet[String]() configs.add(KafkaConfig.MetricReporterClassesProp) currentReporters.values.foreach { case reporter: Reconfigurable => configs.addAll(reporter.reconfigurableConfigs) case _ => } configs } override def validateReconfiguration(configs: util.Map[String, _]): Unit = { val updatedMetricsReporters = metricsReporterClasses(configs) // Ensure all the reporter classes can be loaded and have a default constructor updatedMetricsReporters.foreach { className => val clazz = Utils.loadClass(className, classOf[MetricsReporter]) clazz.getConstructor() } // Validate the new configuration using every reconfigurable reporter instance that is not being deleted currentReporters.values.foreach { case reporter: Reconfigurable => if (updatedMetricsReporters.contains(reporter.getClass.getName)) reporter.validateReconfiguration(configs) case _ => } } override def reconfigure(configs: util.Map[String, _]): Unit = { val updatedMetricsReporters = metricsReporterClasses(configs) val deleted = currentReporters.keySet -- updatedMetricsReporters deleted.foreach(removeReporter) currentReporters.values.foreach { case reporter: Reconfigurable => dynamicConfig.maybeReconfigure(reporter, dynamicConfig.currentKafkaConfig, configs) case _ => } val added = updatedMetricsReporters -- currentReporters.keySet createReporters(added.asJava, configs) } private def createReporters(reporterClasses: util.List[String], updatedConfigs: util.Map[String, _]): Unit = { val props = new util.HashMap[String, AnyRef] updatedConfigs.asScala.foreach { case (k, v) => props.put(k, v.asInstanceOf[AnyRef]) } propsOverride.foreach { case (k, v) => props.put(k, v) } val reporters = dynamicConfig.currentKafkaConfig.getConfiguredInstances(reporterClasses, classOf[MetricsReporter], props) reporters.asScala.foreach { reporter => metrics.addReporter(reporter) currentReporters += reporter.getClass.getName -> reporter } server.notifyClusterListeners(reporters.asScala) } private def removeReporter(className: String): Unit = { currentReporters.remove(className).foreach(metrics.removeReporter) } private def metricsReporterClasses(configs: util.Map[String, _]): mutable.Buffer[String] = { configs.get(KafkaConfig.MetricReporterClassesProp).asInstanceOf[util.List[String]].asScala } } object DynamicListenerConfig { val ReconfigurableConfigs = Set( // Listener configs KafkaConfig.AdvertisedListenersProp, KafkaConfig.ListenersProp, KafkaConfig.ListenerSecurityProtocolMapProp, // SSL configs KafkaConfig.PrincipalBuilderClassProp, KafkaConfig.SslProtocolProp, KafkaConfig.SslProviderProp, KafkaConfig.SslCipherSuitesProp, KafkaConfig.SslEnabledProtocolsProp, KafkaConfig.SslKeystoreTypeProp, KafkaConfig.SslKeystoreLocationProp, KafkaConfig.SslKeystorePasswordProp, KafkaConfig.SslKeyPasswordProp, KafkaConfig.SslTruststoreTypeProp, KafkaConfig.SslTruststoreLocationProp, KafkaConfig.SslTruststorePasswordProp, KafkaConfig.SslKeyManagerAlgorithmProp, KafkaConfig.SslTrustManagerAlgorithmProp, KafkaConfig.SslEndpointIdentificationAlgorithmProp, KafkaConfig.SslSecureRandomImplementationProp, KafkaConfig.SslClientAuthProp, // SASL configs KafkaConfig.SaslMechanismInterBrokerProtocolProp, KafkaConfig.SaslJaasConfigProp, KafkaConfig.SaslEnabledMechanismsProp, KafkaConfig.SaslKerberosServiceNameProp, KafkaConfig.SaslKerberosKinitCmdProp, KafkaConfig.SaslKerberosTicketRenewWindowFactorProp, KafkaConfig.SaslKerberosTicketRenewJitterProp, KafkaConfig.SaslKerberosMinTimeBeforeReloginProp, KafkaConfig.SaslKerberosPrincipalToLocalRulesProp, KafkaConfig.SaslLoginRefreshWindowFactorProp, KafkaConfig.SaslLoginRefreshWindowJitterProp, KafkaConfig.SaslLoginRefreshMinPeriodSecondsProp, KafkaConfig.SaslLoginRefreshBufferSecondsProp ) } class DynamicClientQuotaCallback(brokerId: Int, server: KafkaServer) extends Reconfigurable { override def configure(configs: util.Map[String, _]): Unit = {} override def reconfigurableConfigs(): util.Set[String] = { val configs = new util.HashSet[String]() server.quotaManagers.clientQuotaCallback.foreach { case callback: Reconfigurable => configs.addAll(callback.reconfigurableConfigs) case _ => } configs } override def validateReconfiguration(configs: util.Map[String, _]): Unit = { server.quotaManagers.clientQuotaCallback.foreach { case callback: Reconfigurable => callback.validateReconfiguration(configs) case _ => } } override def reconfigure(configs: util.Map[String, _]): Unit = { val config = server.config server.quotaManagers.clientQuotaCallback.foreach { case callback: Reconfigurable => config.dynamicConfig.maybeReconfigure(callback, config.dynamicConfig.currentKafkaConfig, configs) true case _ => false } } } class DynamicListenerConfig(server: KafkaServer) extends BrokerReconfigurable with Logging { override def reconfigurableConfigs: Set[String] = { DynamicListenerConfig.ReconfigurableConfigs } def validateReconfiguration(newConfig: KafkaConfig): Unit = { def immutableListenerConfigs(kafkaConfig: KafkaConfig, prefix: String): Map[String, AnyRef] = { newConfig.originals.asScala .filterKeys(_.startsWith(prefix)) .filterKeys(k => !DynamicSecurityConfigs.contains(k)) } val oldConfig = server.config val newListeners = listenersToMap(newConfig.listeners) val newAdvertisedListeners = listenersToMap(newConfig.advertisedListeners) val oldListeners = listenersToMap(oldConfig.listeners) if (!newAdvertisedListeners.keySet.subsetOf(newListeners.keySet)) throw new ConfigException(s"Advertised listeners '$newAdvertisedListeners' must be a subset of listeners '$newListeners'") if (!newListeners.keySet.subsetOf(newConfig.listenerSecurityProtocolMap.keySet)) throw new ConfigException(s"Listeners '$newListeners' must be subset of listener map '${newConfig.listenerSecurityProtocolMap}'") newListeners.keySet.intersect(oldListeners.keySet).foreach { listenerName => val prefix = listenerName.configPrefix val newListenerProps = immutableListenerConfigs(newConfig, prefix) val oldListenerProps = immutableListenerConfigs(oldConfig, prefix) if (newListenerProps != oldListenerProps) throw new ConfigException(s"Configs cannot be updated dynamically for existing listener $listenerName, " + "restart broker or create a new listener for update") if (oldConfig.listenerSecurityProtocolMap(listenerName) != newConfig.listenerSecurityProtocolMap(listenerName)) throw new ConfigException(s"Security protocol cannot be updated for existing listener $listenerName") } if (!newAdvertisedListeners.contains(newConfig.interBrokerListenerName)) throw new ConfigException(s"Advertised listener must be specified for inter-broker listener ${newConfig.interBrokerListenerName}") } def reconfigure(oldConfig: KafkaConfig, newConfig: KafkaConfig): Unit = { val newListeners = newConfig.listeners val newListenerMap = listenersToMap(newListeners) val oldListeners = oldConfig.listeners val oldListenerMap = listenersToMap(oldListeners) val listenersRemoved = oldListeners.filterNot(e => newListenerMap.contains(e.listenerName)) val listenersAdded = newListeners.filterNot(e => oldListenerMap.contains(e.listenerName)) // Clear SASL login cache to force re-login if (listenersAdded.nonEmpty || listenersRemoved.nonEmpty) LoginManager.closeAll() server.socketServer.removeListeners(listenersRemoved) if (listenersAdded.nonEmpty) server.socketServer.addListeners(listenersAdded) server.kafkaController.updateBrokerInfo(server.createBrokerInfo) } private def listenersToMap(listeners: Seq[EndPoint]): Map[ListenerName, EndPoint] = listeners.map(e => (e.listenerName, e)).toMap }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy