com.infobip.kafkistry.kafka.ops.ResetConsumerGroupOps.kt Maven / Gradle / Ivy
package com.infobip.kafkistry.kafka.ops
import com.infobip.kafkistry.kafka.*
import com.infobip.kafkistry.model.ConsumerGroupId
import com.infobip.kafkistry.model.TopicName
import com.infobip.kafkistry.service.KafkaClusterManagementException
import org.apache.kafka.clients.admin.*
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import java.time.Duration
import java.util.concurrent.CompletableFuture
import java.util.concurrent.CountDownLatch
import java.util.concurrent.TimeUnit
class ResetConsumerGroupOps(
clientCtx: ClientCtx,
private val consumerGroupOps: ConsumerGroupOps,
private val topicOffsetsOps: TopicOffsetsOps,
private val consumerSupplier: ClientFactory.ConsumerSupplier,
) : BaseOps(clientCtx) {
fun resetConsumerGroup(
groupId: ConsumerGroupId,
reset: GroupOffsetsReset
): CompletableFuture {
val hasPartitionsToReset = reset.topics.any { it.partitions == null || it.partitions.isNotEmpty() }
if (!hasPartitionsToReset) {
throw KafkaClusterManagementException("Can't perform reset, no topic/partitions selected")
}
val topicsOffsets = topicOffsetsOps.topicsOffsets(reset.topics.map { it.topic })
val consumerGroupFuture = consumerGroupOps.consumerGroup(groupId)
return with(ResetConsumerGroupCtx(groupId, reset)) {
val currentGroupOffsets: Map by lazy { currentGroupOffsets() }
CompletableFuture.allOf(topicsOffsets, consumerGroupFuture)
.thenApply { checkGroupState(consumerGroupFuture.get()) }
.thenApply { resolveTopicPartitionSeeks(topicsOffsets.get()) }
.thenCompose { resolveTargetOffsets(it, currentGroupOffsets) }
.thenApply { ensureTargetOffsetsWithinBounds(it, topicsOffsets.get()) }
.thenCompose { targetOffsets -> doResetConsumerGroup(targetOffsets).thenApply { targetOffsets } }
.thenApply { targetOffsets ->
constructResult(currentGroupOffsets, targetOffsets, consumerGroupFuture.get())
}
}
}
private inner class ResetConsumerGroupCtx(
val groupId: ConsumerGroupId,
val reset: GroupOffsetsReset,
) {
fun checkGroupState(consumerGroup: ConsumerGroup) {
when (consumerGroup.status) {
ConsumerGroupStatus.EMPTY, ConsumerGroupStatus.DEAD, ConsumerGroupStatus.UNKNOWN -> Unit
else -> throw KafkaClusterManagementException(
"Aborting reset to consumer group's '$groupId' offset(s) because it need to be inactive, " +
"current state: " + consumerGroup.status
)
}
}
fun currentGroupOffsets(): Map {
return consumerSupplier.createNewConsumer { props ->
props[ConsumerConfig.GROUP_ID_CONFIG] = groupId
props[ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG] = "false"
}.use { consumer ->
val subscribeLatch = CountDownLatch(1)
consumer.subscribe(reset.topics.map { it.topic }, object : ConsumerRebalanceListener {
override fun onPartitionsAssigned(partitions: MutableCollection?) =
subscribeLatch.countDown()
override fun onPartitionsRevoked(partitions: MutableCollection?) = Unit
})
//do some polling which is needed for KafkaConsumer to assign offsets
val consumedRecordsCounts = sequence {
var remainingAttempts = 10
while (remainingAttempts > 0) {
consumer.poll(Duration.ofSeconds(1)).also { yieldAll(it) }
val subscribed = subscribeLatch.await(1, TimeUnit.SECONDS)
if (subscribed) {
break
}
remainingAttempts--
}
}.groupingBy { TopicPartition(it.topic(), it.partition()) }.eachCount()
consumer.assignment().associateWith { topicPartition ->
val currentOffset = consumer.position(topicPartition, readTimeoutDuration())
val correction = consumedRecordsCounts[topicPartition] ?: 0
currentOffset - correction
}
}
}
fun resolveTopicPartitionSeeks(
topicsOffsets: Map>
): Map {
return reset.topics
.associateBy { it.topic }
.mapValues { (topic, topicSeek) ->
val topicPartitions = topicsOffsets[topic]?.keys
?: throw KafkaClusterManagementException("Did not get response offsets for topic '$topic'")
when (topicSeek.partitions) {
null -> topicPartitions.associate { TopicPartition(topic, it) to reset.seek }
else -> topicSeek.partitions.associate {
val topicPartition = TopicPartition(topic, it.partition)
if (it.partition !in topicPartitions) {
throw KafkaClusterManagementException("$topicPartition does not exist, can't perform offset reset")
}
topicPartition to (it.seek ?: reset.seek)
}
}
}
.flatMap { (_, partitionSeeks) ->
partitionSeeks.map { it.toPair() }
}
.associate { it }
}
fun resolveTargetOffsets(
topicPartitionSeeks: Map,
currentGroupOffsets: Map
): CompletableFuture
© 2015 - 2025 Weber Informatics LLC | Privacy Policy