ui.topics.partitionReplicaAssignments.ftl Maven / Gradle / Ivy
<#-- @ftlvariable name="appUrl" type="com.infobip.kafkistry.webapp.url.AppUrl" -->
<#-- @ftlvariable name="clusterIdentifier" type="java.lang.String" -->
<#-- @ftlvariable name="topicName" type="java.lang.String" -->
<#-- @ftlvariable name="clusterInfo" type="com.infobip.kafkistry.kafka.ClusterInfo" -->
<#-- @ftlvariable name="partitionsAssignments" type="java.util.List" -->
<#-- @ftlvariable name="assignmentsDisbalance" type="com.infobip.kafkistry.service.generator.AssignmentsDisbalance" -->
<#-- @ftlvariable name="partitionChange" type="com.infobip.kafkistry.service.topic.PartitionPropertyChange" -->
<#-- @ftlvariable name="assignmentStatus" type="com.infobip.kafkistry.service.topic.PartitionsAssignmentsStatus" -->
<#-- @ftlvariable name="topicReplicas" type="com.infobip.kafkistry.kafkastate.TopicReplicaInfos" -->
<#-- @ftlvariable name="partitionReAssignments" type="java.util.Map" -->
<#function getReplicaAssignment partitionAssignments brokerId>
<#-- @ftlvariable name="partitionAssignments" type="com.infobip.kafkistry.kafka.PartitionAssignments" -->
<#-- @ftlvariable name="brokerId" type="java.lang.Integer" -->
<#list partitionAssignments.replicasAssignments as replica>
<#if replica.brokerId == brokerId>
<#return replica>
#if>
#list>
#function>
<#import "../common/util.ftl" as _util>
<#import "../common/infoIcon.ftl" as _info_>
<#import "../sql/sqlQueries.ftl" as sql>
<#assign tinyMode = clusterInfo.brokerIds?size gt 6>
Partition replicas assignments over brokers
<@sql.topicReplicaSizes cluster=clusterInfo.identifier topic=topicName/>
<@sql.topicBrokerReplicaSizes cluster=clusterInfo.identifier topic=topicName/>
<@sql.topicReplicaLeadersCounts cluster=clusterInfo.identifier topic=topicName/>
<#if clusterInfo??>
<#if assignmentsDisbalance??>
<#if assignmentsDisbalance.replicasDisbalance == 0 && assignmentsDisbalance.leadersDisbalance == 0
&& assignmentsDisbalance.partitionsPerRackDisbalance.totalDisbalance == 0>
Partition and leader assignments over brokers is optimal
<#else>
<#if assignmentsDisbalance.replicasDisbalance gt 0>
Partition assignments disbalance
is ${assignmentsDisbalance.replicasDisbalance} (# of needed replica migrations)
| ${assignmentsDisbalance.replicasDisbalancePercent?string["0.##"]}% of all replicas
<#else>
Partition replicas placement over brokers is optimal
#if>
<#if assignmentsDisbalance.leadersDisbalance gt 0>
Partition leaders disbalance
is ${assignmentsDisbalance.leadersDisbalance} (# of needed re-orders with elections)
| ${assignmentsDisbalance.leadersDisbalancePercent?string["0.##"]}% of all partitions
<#else>
Partition leaders distribution over brokers is optimal
#if>
<#if assignmentsDisbalance.partitionsPerRackDisbalance.totalDisbalance gt 0>
<#assign singleRckPart = assignmentsDisbalance.partitionsPerRackDisbalance.singleRackPartitions>
<#if singleRckPart?size gt 0>
Partitions [<#list singleRckPart as p>${p}
<#if p?has_next>, #if>#list>]
have replicas on brokers of the same rack.
#if>
<#assign disbalancedRckPart = []>
<#list assignmentsDisbalance.partitionsPerRackDisbalance.partitionDisbalance as p, d>
<#if d gt 0 && !singleRckPart?seq_contains(p)>
<#assign disbalancedRckPart += [p]>
#if>
#list>
<#if disbalancedRckPart?size gt 0>
Partitions [<#list disbalancedRckPart as p>${p}
<#if p?has_next>, #if>#list>]
have un-even distribution across brokers on different racks.
#if>
#if>
#if>
#if>
<#import "../clusters/clusterNodesList.ftl" as brokerBadge>
Brokers
Partition
<#if assignmentStatus.clusterHasRacks> #if>
<#list (clusterInfo.brokerIds) as brokerId>
<@brokerBadge.clusterNodeId nodeId=brokerId/>
#list>
<#if topicReplicas??>
${_util.prettyDataSize(topicReplicas.totalSizeBytes)}
<#if assignmentStatus.clusterHasRacks>
Racks
#if>
<#list clusterInfo.brokerIds as brokerId>
<#assign brokerTotalBytes = (topicReplicas.brokerTotalSizes?api.get(brokerId))!0>
${_util.prettyDataSize(brokerTotalBytes)}
#list>
#if>
<#list assignmentStatus.partitions as partition>
<#assign rackUsageDisbalanced = assignmentsDisbalance?? && assignmentsDisbalance.partitionsPerRackDisbalance.partitionDisbalance?api.get(partition.partition) gt 0>
<#assign singleRackWarning = assignmentStatus.clusterHasRacks && partition.singleRackReplicas>
${partition.partition}
<#if assignmentStatus.clusterHasRacks>
<#if singleRackWarning>
Single rack
#if>
<#list partition.rackCounts as rackStatus>
<#assign rackTitle = (rackStatus.oldCount == rackStatus.newCount)?then(
"Rack=${rackStatus.rack}, #replicas=${rackStatus.newCount}",
"Rack=${rackStatus.rack}, #replicas-before=${rackStatus.oldCount}, #replicas-after=${rackStatus.newCount}"
)>
<#if rackStatus.oldCount == rackStatus.newCount>
${rackStatus.newCount}
<#else>
<#if rackStatus.newCount gt rackStatus.oldCount>
<#if rackStatus.oldCount == 0>
+${rackStatus.newCount}
<#else>
${rackStatus.oldCount}
+${rackStatus.newCount-rackStatus.oldCount}
#if>
#if>
<#if rackStatus.oldCount gt rackStatus.newCount>
<#if rackStatus.newCount == 0>
-${rackStatus.oldCount}
<#else>
${rackStatus.oldCount}
-${rackStatus.oldCount-rackStatus.newCount}
#if>
#if>
#if>
${rackStatus.rack}
#list>
#if>
<#assign hasReAssignment = (partitionReAssignments?api.get(partition.partition))??>
<#if hasReAssignment>
<#assign reAssignment = partitionReAssignments?api.get(partition.partition)>
#if>
<#assign brokerReplicas = partition.brokerReplicas>
<#list brokerReplicas as brokerReplica>
<#assign reAssignAdding = hasReAssignment && reAssignment.addingReplicas?seq_contains(brokerReplica.brokerId)>
<#assign reAssignRemoving = hasReAssignment && reAssignment.removingReplicas?seq_contains(brokerReplica.brokerId)>
<#assign data = {"text": "", "alert": ""}>
<#if brokerReplica.currentStatus??>
<#assign replica = brokerReplica.currentStatus>
<#if replica.leader>
<#assign data = {"text": "Leader", "alert": "alert-primary", "explain": "This replica is currently the leader"}>
<#elseif replica.inSyncReplica>
<#assign data = {"text": "In-sync", "alert": "alert-secondary", "explain": "This replica is currently in-sync with the leader of partition"}>
<#elseif reAssignAdding>
<#assign data = {"text": "Out-of-sync", "alert": "alert-warning", "explain": "This replica is being added by re-assignment and currently out-of-sync"}>
<#else>
<#assign data = {"text": "Out-of-sync", "alert": "alert-danger", "explain": "This replica is currently not in sync wioth the leader of the partition"}>
#if>
<#elseif brokerReplica.added>
<#assign data = {"text": "New", "alert": "alert-success", "explain": "This replica will be added"}>
<#elseif brokerReplica.removed>
<#assign data = {"text": "Remove", "alert": "alert-warning", "explain": "This replica will be removed"}>
<#else>
<#assign data = {"text": "", "alert": ""}>
#if>
<#assign leaderData = {"text": "", "class": ""}>
<#if brokerReplica.newLeader>
<#assign leaderData = {"text": "+L", "class": "text-success", "explain": "This replica will become preferred leader"}>
<#elseif brokerReplica.exLeader>
<#assign leaderData = {"text": "-L", "class": "text-danger", "explain": "This replica will stop being preferred leader"}>
<#elseif (brokerReplica.currentStatus.preferredLeader && !brokerReplica.currentStatus.leader)!false>
<#assign leaderData = {"text": "*L", "class": "text-primary", "explain": "This replica is preferred leader but it is not currently a leader"}>
<#elseif (!brokerReplica.currentStatus.preferredLeader && brokerReplica.currentStatus.leader)!false>
<#assign leaderData = {"text": "L*", "class": "text-secondary", "explain": "This replica is not preferred leader but it is currently a leader"}>
#if>
<#assign crossed = brokerReplica.removed || reAssignRemoving>
<#assign haveReplicaInfo = false>
<#-- @ftlvariable name="replicaInfo" type="com.infobip.kafkistry.kafka.TopicPartitionReplica" -->
<#if topicReplicas??>
<#assign b = brokerReplica.brokerId>
<#assign p = partition.partition>
<#if (topicReplicas.brokerPartitionReplicas?api.get(b)?api.get(p))??>
<#assign replicaInfo = topicReplicas.brokerPartitionReplicas?api.get(b)?api.get(p)>
<#assign haveReplicaInfo = true>
#if>
#if>
<#if data["text"] != "">
<#assign tooltip>
<#if (data["explain"])??>
- ${data["explain"]}
#if>
<#if (leaderData["explain"])??>
- ${leaderData["explain"]}
#if>
<#if brokerReplica.removed>
- This replica wil be removed
#if>
<#if 0 <= brokerReplica.rank>
-
<#if brokerReplica.rank == 0>1st
<#elseif brokerReplica.rank == 1>2nd
<#elseif brokerReplica.rank == 2>3rd
<#else>${brokerReplica.rank+1}th
#if>
priority preferred replica
#if>
<#if reAssignAdding>
- This replica is being added by re-assignment
#if>
<#if reAssignRemoving>
- This replica is being removed by re-assignment
#if>
<#if haveReplicaInfo>
<#if replicaInfo.future>
- This replica is 'future' assignment meaning it will
became part of replica set after re-assignment finish
sync
#if>
- Size on disk:
${_util.prettyDataSize(replicaInfo.sizeBytes)}
- Root dir:
${replicaInfo.rootDir}
<#if !(brokerReplica.currentStatus??) || brokerReplica.currentStatus.leader>
-
Lag diff between HW and LEO:
<#if replicaInfo.offsetLag gt 0>
Lag: ${_util.prettyNumber(replicaInfo.offsetLag)}
<#else>
No lag
#if>
#if>
#if>
-
Broker rack:
<#if brokerReplica.rack??>
${brokerReplica.rack}
<#else>
(NULL)
#if>
#assign>
<@_info_.icon tooltip=tooltip/>
#if>
<#if 0 <= brokerReplica.rank>
[${brokerReplica.rank?c}]
#if>
<#if tinyMode>
#if>
${data["text"]}
${leaderData["text"]}
<#if haveReplicaInfo>
${_util.prettyDataSize(replicaInfo.sizeBytes)}
<#if replicaInfo.future>
future replica
#if>
#if>
#list>
#list>
<#else>
Cluster is unreachable, can't show assignments
#if>