org.apache.hadoop.hbase.ClusterMetricsBuilder Maven / Gradle / Ivy
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.FSProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
@InterfaceAudience.Private
public final class ClusterMetricsBuilder {
public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics metrics) {
ClusterStatusProtos.ClusterStatus.Builder builder
= ClusterStatusProtos.ClusterStatus.newBuilder()
.addAllBackupMasters(metrics.getBackupMasterNames().stream()
.map(ProtobufUtil::toServerName).collect(Collectors.toList()))
.addAllDeadServers(metrics.getDeadServerNames().stream()
.map(ProtobufUtil::toServerName).collect(Collectors.toList()))
.addAllLiveServers(metrics.getLiveServerMetrics().entrySet().stream()
.map(s -> ClusterStatusProtos.LiveServerInfo
.newBuilder()
.setServer(ProtobufUtil.toServerName(s.getKey()))
.setServerLoad(ServerMetricsBuilder.toServerLoad(s.getValue()))
.build())
.collect(Collectors.toList()))
.addAllMasterCoprocessors(metrics.getMasterCoprocessorNames().stream()
.map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build())
.collect(Collectors.toList()))
.addAllRegionsInTransition(metrics.getRegionStatesInTransition().stream()
.map(r -> ClusterStatusProtos.RegionInTransition
.newBuilder()
.setSpec(HBaseProtos.RegionSpecifier
.newBuilder()
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
.setValue(UnsafeByteOperations.unsafeWrap(r.getRegion().getRegionName()))
.build())
.setRegionState(r.convert())
.build())
.collect(Collectors.toList()))
.setMasterInfoPort(metrics.getMasterInfoPort());
if (metrics.getMasterName() != null) {
builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName())));
}
if (metrics.getBalancerOn() != null) {
builder.setBalancerOn(metrics.getBalancerOn());
}
if (metrics.getClusterId() != null) {
builder.setClusterId(new ClusterId(metrics.getClusterId()).convert());
}
if (metrics.getHBaseVersion() != null) {
builder.setHbaseVersion(
FSProtos.HBaseVersionFileContent.newBuilder()
.setVersion(metrics.getHBaseVersion()));
}
return builder.build();
}
public static ClusterMetrics toClusterMetrics(
ClusterStatusProtos.ClusterStatus proto) {
ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder();
builder.setLiveServerMetrics(proto.getLiveServersList().stream()
.collect(Collectors.toMap(e -> ProtobufUtil.toServerName(e.getServer()),
ServerMetricsBuilder::toServerMetrics)))
.setDeadServerNames(proto.getDeadServersList().stream()
.map(ProtobufUtil::toServerName)
.collect(Collectors.toList()))
.setBackerMasterNames(proto.getBackupMastersList().stream()
.map(ProtobufUtil::toServerName)
.collect(Collectors.toList()))
.setRegionsInTransition(proto.getRegionsInTransitionList().stream()
.map(ClusterStatusProtos.RegionInTransition::getRegionState)
.map(RegionState::convert)
.collect(Collectors.toList()))
.setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream()
.map(HBaseProtos.Coprocessor::getName)
.collect(Collectors.toList()));
if (proto.hasClusterId()) {
builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString());
}
if (proto.hasHbaseVersion()) {
builder.setHBaseVersion(proto.getHbaseVersion().getVersion());
}
if (proto.hasMaster()) {
builder.setMasterName(ProtobufUtil.toServerName(proto.getMaster()));
}
if (proto.hasBalancerOn()) {
builder.setBalancerOn(proto.getBalancerOn());
}
if (proto.hasMasterInfoPort()) {
builder.setMasterInfoPort(proto.getMasterInfoPort());
}
return builder.build();
}
/**
* Convert ClusterStatusProtos.Option to ClusterMetrics.Option
* @param option a ClusterStatusProtos.Option
* @return converted ClusterMetrics.Option
*/
public static ClusterMetrics.Option toOption(ClusterStatusProtos.Option option) {
switch (option) {
case HBASE_VERSION: return ClusterMetrics.Option.HBASE_VERSION;
case LIVE_SERVERS: return ClusterMetrics.Option.LIVE_SERVERS;
case DEAD_SERVERS: return ClusterMetrics.Option.DEAD_SERVERS;
case REGIONS_IN_TRANSITION: return ClusterMetrics.Option.REGIONS_IN_TRANSITION;
case CLUSTER_ID: return ClusterMetrics.Option.CLUSTER_ID;
case MASTER_COPROCESSORS: return ClusterMetrics.Option.MASTER_COPROCESSORS;
case MASTER: return ClusterMetrics.Option.MASTER;
case BACKUP_MASTERS: return ClusterMetrics.Option.BACKUP_MASTERS;
case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON;
case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT;
// should not reach here
default: throw new IllegalArgumentException("Invalid option: " + option);
}
}
/**
* Convert ClusterMetrics.Option to ClusterStatusProtos.Option
* @param option a ClusterMetrics.Option
* @return converted ClusterStatusProtos.Option
*/
public static ClusterStatusProtos.Option toOption(ClusterMetrics.Option option) {
switch (option) {
case HBASE_VERSION: return ClusterStatusProtos.Option.HBASE_VERSION;
case LIVE_SERVERS: return ClusterStatusProtos.Option.LIVE_SERVERS;
case DEAD_SERVERS: return ClusterStatusProtos.Option.DEAD_SERVERS;
case REGIONS_IN_TRANSITION: return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION;
case CLUSTER_ID: return ClusterStatusProtos.Option.CLUSTER_ID;
case MASTER_COPROCESSORS: return ClusterStatusProtos.Option.MASTER_COPROCESSORS;
case MASTER: return ClusterStatusProtos.Option.MASTER;
case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS;
case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON;
case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT;
// should not reach here
default: throw new IllegalArgumentException("Invalid option: " + option);
}
}
/**
* Convert a list of ClusterStatusProtos.Option to an enum set of ClusterMetrics.Option
* @param options the pb options
* @return an enum set of ClusterMetrics.Option
*/
public static EnumSet toOptions(List options) {
return options.stream().map(ClusterMetricsBuilder::toOption)
.collect(Collectors.toCollection(() -> EnumSet.noneOf(ClusterMetrics.Option.class)));
}
/**
* Convert an enum set of ClusterMetrics.Option to a list of ClusterStatusProtos.Option
* @param options the ClusterMetrics options
* @return a list of ClusterStatusProtos.Option
*/
public static List toOptions(EnumSet options) {
return options.stream().map(ClusterMetricsBuilder::toOption).collect(Collectors.toList());
}
public static ClusterMetricsBuilder newBuilder() {
return new ClusterMetricsBuilder();
}
@Nullable
private String hbaseVersion;
private List deadServerNames = Collections.emptyList();
private Map liveServerMetrics = new TreeMap<>();
@Nullable
private ServerName masterName;
private List backupMasterNames = Collections.emptyList();
private List regionsInTransition = Collections.emptyList();
@Nullable
private String clusterId;
private List masterCoprocessorNames = Collections.emptyList();
@Nullable
private Boolean balancerOn;
private int masterInfoPort;
private ClusterMetricsBuilder() {
}
public ClusterMetricsBuilder setHBaseVersion(String value) {
this.hbaseVersion = value;
return this;
}
public ClusterMetricsBuilder setDeadServerNames(List value) {
this.deadServerNames = value;
return this;
}
public ClusterMetricsBuilder setLiveServerMetrics(Map value) {
liveServerMetrics.putAll(value);
return this;
}
public ClusterMetricsBuilder setMasterName(ServerName value) {
this.masterName = value;
return this;
}
public ClusterMetricsBuilder setBackerMasterNames(List value) {
this.backupMasterNames = value;
return this;
}
public ClusterMetricsBuilder setRegionsInTransition(List value) {
this.regionsInTransition = value;
return this;
}
public ClusterMetricsBuilder setClusterId(String value) {
this.clusterId = value;
return this;
}
public ClusterMetricsBuilder setMasterCoprocessorNames(List value) {
this.masterCoprocessorNames = value;
return this;
}
public ClusterMetricsBuilder setBalancerOn(@Nullable Boolean value) {
this.balancerOn = value;
return this;
}
public ClusterMetricsBuilder setMasterInfoPort(int value) {
this.masterInfoPort = value;
return this;
}
public ClusterMetrics build() {
return new ClusterMetricsImpl(
hbaseVersion,
deadServerNames,
liveServerMetrics,
masterName,
backupMasterNames,
regionsInTransition,
clusterId,
masterCoprocessorNames,
balancerOn,
masterInfoPort);
}
private static class ClusterMetricsImpl implements ClusterMetrics {
@Nullable
private final String hbaseVersion;
private final List deadServerNames;
private final Map liveServerMetrics;
@Nullable
private final ServerName masterName;
private final List backupMasterNames;
private final List regionsInTransition;
@Nullable
private final String clusterId;
private final List masterCoprocessorNames;
@Nullable
private final Boolean balancerOn;
private final int masterInfoPort;
ClusterMetricsImpl(String hbaseVersion, List deadServerNames,
Map liveServerMetrics,
ServerName masterName,
List backupMasterNames,
List regionsInTransition,
String clusterId,
List masterCoprocessorNames,
Boolean balancerOn,
int masterInfoPort) {
this.hbaseVersion = hbaseVersion;
this.deadServerNames = Preconditions.checkNotNull(deadServerNames);
this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics);
this.masterName = masterName;
this.backupMasterNames = Preconditions.checkNotNull(backupMasterNames);
this.regionsInTransition = Preconditions.checkNotNull(regionsInTransition);
this.clusterId = clusterId;
this.masterCoprocessorNames = Preconditions.checkNotNull(masterCoprocessorNames);
this.balancerOn = balancerOn;
this.masterInfoPort = masterInfoPort;
}
@Override
public String getHBaseVersion() {
return hbaseVersion;
}
@Override
public List getDeadServerNames() {
return Collections.unmodifiableList(deadServerNames);
}
@Override
public Map getLiveServerMetrics() {
return Collections.unmodifiableMap(liveServerMetrics);
}
@Override
public ServerName getMasterName() {
return masterName;
}
@Override
public List getBackupMasterNames() {
return Collections.unmodifiableList(backupMasterNames);
}
@Override
public List getRegionStatesInTransition() {
return Collections.unmodifiableList(regionsInTransition);
}
@Override
public String getClusterId() {
return clusterId;
}
@Override
public List getMasterCoprocessorNames() {
return Collections.unmodifiableList(masterCoprocessorNames);
}
@Override
public Boolean getBalancerOn() {
return balancerOn;
}
@Override
public int getMasterInfoPort() {
return masterInfoPort;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder(1024);
sb.append("Master: " + getMasterName());
int backupMastersSize = getBackupMasterNames().size();
sb.append("\nNumber of backup masters: " + backupMastersSize);
if (backupMastersSize > 0) {
for (ServerName serverName: getBackupMasterNames()) {
sb.append("\n " + serverName);
}
}
int serversSize = getLiveServerMetrics().size();
sb.append("\nNumber of live region servers: " + serversSize);
if (serversSize > 0) {
for (ServerName serverName : getLiveServerMetrics().keySet()) {
sb.append("\n " + serverName.getServerName());
}
}
int deadServerSize = getDeadServerNames().size();
sb.append("\nNumber of dead region servers: " + deadServerSize);
if (deadServerSize > 0) {
for (ServerName serverName : getDeadServerNames()) {
sb.append("\n " + serverName);
}
}
sb.append("\nAverage load: " + getAverageLoad());
sb.append("\nNumber of requests: " + getRequestCount());
sb.append("\nNumber of regions: " + getRegionCount());
int ritSize = getRegionStatesInTransition().size();
sb.append("\nNumber of regions in transition: " + ritSize);
if (ritSize > 0) {
for (RegionState state : getRegionStatesInTransition()) {
sb.append("\n " + state.toDescriptiveString());
}
}
return sb.toString();
}
}
}