org.apache.hadoop.hbase.client.HBaseAdmin Maven / Gradle / Ivy
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import com.google.protobuf.Descriptors;
import com.google.protobuf.Message;
import com.google.protobuf.RpcController;
import java.io.Closeable;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.CacheEvictionStats;
import org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.ClusterMetricsBuilder;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.NamespaceNotFoundException;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.RegionMetrics;
import org.apache.hadoop.hbase.RegionMetricsBuilder;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
import org.apache.hadoop.hbase.client.replication.TableCFs;
import org.apache.hadoop.hbase.client.security.SecurityCapability;
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaRetriever;
import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
/**
* HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that
* this is an HBase-internal class as defined in
* https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
* There are no guarantees for backwards source / binary compatibility and methods or class can
* change or go away without deprecation.
* Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing
* an HBaseAdmin directly.
*
* Connection should be an unmanaged connection obtained via
* {@link ConnectionFactory#createConnection(Configuration)}
*
* @see ConnectionFactory
* @see Connection
* @see Admin
*/
@InterfaceAudience.Private
public class HBaseAdmin implements Admin {
private static final Logger LOG = LoggerFactory.getLogger(HBaseAdmin.class);
private ClusterConnection connection;
private final Configuration conf;
private final long pause;
private final int numRetries;
private final int syncWaitTimeout;
private boolean aborted;
private int operationTimeout;
private int rpcTimeout;
private int getProcedureTimeout;
private RpcRetryingCallerFactory rpcCallerFactory;
private RpcControllerFactory rpcControllerFactory;
private NonceGenerator ng;
@Override
public int getOperationTimeout() {
return operationTimeout;
}
HBaseAdmin(ClusterConnection connection) throws IOException {
this.conf = connection.getConfiguration();
this.connection = connection;
// TODO: receive ConnectionConfiguration here rather than re-parsing these configs every time.
this.pause = this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
this.rpcTimeout = this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
this.syncWaitTimeout = this.conf.getInt(
"hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min
this.getProcedureTimeout =
this.conf.getInt("hbase.client.procedure.future.get.timeout.msec", 10 * 60000); // 10min
this.rpcCallerFactory = connection.getRpcRetryingCallerFactory();
this.rpcControllerFactory = connection.getRpcControllerFactory();
this.ng = this.connection.getNonceGenerator();
}
@Override
public void abort(String why, Throwable e) {
// Currently does nothing but throw the passed message and exception
this.aborted = true;
throw new RuntimeException(why, e);
}
@Override
public boolean isAborted() {
return this.aborted;
}
@Override
public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
throws IOException {
return get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
TimeUnit.MILLISECONDS);
}
@Override
public Future abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning)
throws IOException {
Boolean abortProcResponse =
executeCallable(new MasterCallable(getConnection(),
getRpcControllerFactory()) {
@Override
protected AbortProcedureResponse rpcCall() throws Exception {
AbortProcedureRequest abortProcRequest =
AbortProcedureRequest.newBuilder().setProcId(procId).build();
return master.abortProcedure(getRpcController(), abortProcRequest);
}
}).getIsProcedureAborted();
return new AbortProcedureFuture(this, procId, abortProcResponse);
}
@Override
public List listTableDescriptors() throws IOException {
return listTableDescriptors((Pattern)null, false);
}
@Override
public List listTableDescriptors(Pattern pattern) throws IOException {
return listTableDescriptors(pattern, false);
}
@Override
public List listTableDescriptors(Pattern pattern, boolean includeSysTables)
throws IOException {
return executeCallable(new MasterCallable>(getConnection(),
getRpcControllerFactory()) {
@Override
protected List rpcCall() throws Exception {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
req));
}
});
}
@Override
public TableDescriptor getDescriptor(TableName tableName)
throws TableNotFoundException, IOException {
return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory,
operationTimeout, rpcTimeout);
}
@Override
public void modifyTable(TableDescriptor td) throws IOException {
get(modifyTableAsync(td), syncWaitTimeout, TimeUnit.MILLISECONDS);
}
@Override
public Future modifyTableAsync(TableDescriptor td) throws IOException {
ModifyTableResponse response = executeCallable(
new MasterCallable(getConnection(), getRpcControllerFactory()) {
Long nonceGroup = ng.getNonceGroup();
Long nonce = ng.newNonce();
@Override
protected ModifyTableResponse rpcCall() throws Exception {
setPriority(td.getTableName());
ModifyTableRequest request = RequestConverter.buildModifyTableRequest(
td.getTableName(), td, nonceGroup, nonce);
return master.modifyTable(getRpcController(), request);
}
});
return new ModifyTableFuture(this, td.getTableName(), response);
}
@Override
public List listTableDescriptorsByNamespace(byte[] name) throws IOException {
return executeCallable(new MasterCallable>(getConnection(),
getRpcControllerFactory()) {
@Override
protected List rpcCall() throws Exception {
return master.listTableDescriptorsByNamespace(getRpcController(),
ListTableDescriptorsByNamespaceRequest.newBuilder()
.setNamespaceName(Bytes.toString(name)).build())
.getTableSchemaList()
.stream()
.map(ProtobufUtil::toTableDescriptor)
.collect(Collectors.toList());
}
});
}
@Override
public List listTableDescriptors(List tableNames) throws IOException {
return executeCallable(new MasterCallable>(getConnection(),
getRpcControllerFactory()) {
@Override
protected List rpcCall() throws Exception {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
req));
}
});
}
@Override
public List getRegions(final ServerName sn) throws IOException {
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
// TODO: There is no timeout on this controller. Set one!
HBaseRpcController controller = rpcControllerFactory.newController();
return ProtobufUtil.getOnlineRegions(controller, admin);
}
@Override
public List getRegions(TableName tableName) throws IOException {
if (TableName.isMetaTableName(tableName)) {
return Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
} else {
return MetaTableAccessor.getTableRegions(connection, tableName, true);
}
}
private static class AbortProcedureFuture extends ProcedureFuture {
private boolean isAbortInProgress;
public AbortProcedureFuture(
final HBaseAdmin admin,
final Long procId,
final Boolean abortProcResponse) {
super(admin, procId);
this.isAbortInProgress = abortProcResponse;
}
@Override
public Boolean get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
if (!this.isAbortInProgress) {
return false;
}
super.get(timeout, unit);
return true;
}
}
/** @return Connection used by this object. */
@Override
public Connection getConnection() {
return connection;
}
@Override
public boolean tableExists(final TableName tableName) throws IOException {
return executeCallable(new RpcRetryingCallable() {
@Override
protected Boolean rpcCall(int callTimeout) throws Exception {
return MetaTableAccessor.tableExists(connection, tableName);
}
});
}
@Override
public HTableDescriptor[] listTables() throws IOException {
return listTables((Pattern)null, false);
}
@Override
public HTableDescriptor[] listTables(Pattern pattern) throws IOException {
return listTables(pattern, false);
}
@Override
public HTableDescriptor[] listTables(String regex) throws IOException {
return listTables(Pattern.compile(regex), false);
}
@Override
public HTableDescriptor[] listTables(final Pattern pattern, final boolean includeSysTables)
throws IOException {
return executeCallable(new MasterCallable(getConnection(),
getRpcControllerFactory()) {
@Override
protected HTableDescriptor[] rpcCall() throws Exception {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
req)).stream().map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new);
}
});
}
@Override
public HTableDescriptor[] listTables(String regex, boolean includeSysTables)
throws IOException {
return listTables(Pattern.compile(regex), includeSysTables);
}
@Override
public TableName[] listTableNames() throws IOException {
return listTableNames((Pattern)null, false);
}
@Override
public TableName[] listTableNames(Pattern pattern) throws IOException {
return listTableNames(pattern, false);
}
@Override
public TableName[] listTableNames(String regex) throws IOException {
return listTableNames(Pattern.compile(regex), false);
}
@Override
public TableName[] listTableNames(final Pattern pattern, final boolean includeSysTables)
throws IOException {
return executeCallable(new MasterCallable(getConnection(),
getRpcControllerFactory()) {
@Override
protected TableName[] rpcCall() throws Exception {
GetTableNamesRequest req =
RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables);
return ProtobufUtil.getTableNameArray(master.getTableNames(getRpcController(), req)
.getTableNamesList());
}
});
}
@Override
public TableName[] listTableNames(final String regex, final boolean includeSysTables)
throws IOException {
return listTableNames(Pattern.compile(regex), includeSysTables);
}
@Override
public HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException {
return getHTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory,
operationTimeout, rpcTimeout);
}
static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection,
RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
int operationTimeout, int rpcTimeout) throws IOException {
if (tableName == null) return null;
TableDescriptor td =
executeCallable(new MasterCallable(connection, rpcControllerFactory) {
@Override
protected TableDescriptor rpcCall() throws Exception {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(tableName);
GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
if (!htds.getTableSchemaList().isEmpty()) {
return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0));
}
return null;
}
}, rpcCallerFactory, operationTimeout, rpcTimeout);
if (td != null) {
return td;
}
throw new TableNotFoundException(tableName.getNameAsString());
}
/**
* @deprecated since 2.0 version and will be removed in 3.0 version.
* use {@link #getTableDescriptor(TableName,
* Connection, RpcRetryingCallerFactory,RpcControllerFactory,int,int)}
*/
@Deprecated
static HTableDescriptor getHTableDescriptor(final TableName tableName, Connection connection,
RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
int operationTimeout, int rpcTimeout) throws IOException {
if (tableName == null) {
return null;
}
HTableDescriptor htd =
executeCallable(new MasterCallable(connection, rpcControllerFactory) {
@Override
protected HTableDescriptor rpcCall() throws Exception {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(tableName);
GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
if (!htds.getTableSchemaList().isEmpty()) {
return new ImmutableHTableDescriptor(
ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)));
}
return null;
}
}, rpcCallerFactory, operationTimeout, rpcTimeout);
if (htd != null) {
return new ImmutableHTableDescriptor(htd);
}
throw new TableNotFoundException(tableName.getNameAsString());
}
private long getPauseTime(int tries) {
int triesCount = tries;
if (triesCount >= HConstants.RETRY_BACKOFF.length) {
triesCount = HConstants.RETRY_BACKOFF.length - 1;
}
return this.pause * HConstants.RETRY_BACKOFF[triesCount];
}
@Override
public void createTable(TableDescriptor desc)
throws IOException {
createTable(desc, null);
}
@Override
public void createTable(TableDescriptor desc, byte [] startKey,
byte [] endKey, int numRegions)
throws IOException {
if(numRegions < 3) {
throw new IllegalArgumentException("Must create at least three regions");
} else if(Bytes.compareTo(startKey, endKey) >= 0) {
throw new IllegalArgumentException("Start key must be smaller than end key");
}
if (numRegions == 3) {
createTable(desc, new byte[][]{startKey, endKey});
return;
}
byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
if(splitKeys == null || splitKeys.length != numRegions - 1) {
throw new IllegalArgumentException("Unable to split key range into enough regions");
}
createTable(desc, splitKeys);
}
@Override
public void createTable(final TableDescriptor desc, byte [][] splitKeys)
throws IOException {
get(createTableAsync(desc, splitKeys), syncWaitTimeout, TimeUnit.MILLISECONDS);
}
@Override
public Future createTableAsync(final TableDescriptor desc, final byte[][] splitKeys)
throws IOException {
if (desc.getTableName() == null) {
throw new IllegalArgumentException("TableName cannot be null");
}
if (splitKeys != null && splitKeys.length > 0) {
Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
// Verify there are no duplicate split keys
byte[] lastKey = null;
for (byte[] splitKey : splitKeys) {
if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
throw new IllegalArgumentException(
"Empty split key must not be passed in the split keys.");
}
if (lastKey != null && Bytes.equals(splitKey, lastKey)) {
throw new IllegalArgumentException("All split keys must be unique, " +
"found duplicate: " + Bytes.toStringBinary(splitKey) +
", " + Bytes.toStringBinary(lastKey));
}
lastKey = splitKey;
}
}
CreateTableResponse response = executeCallable(
new MasterCallable(getConnection(), getRpcControllerFactory()) {
Long nonceGroup = ng.getNonceGroup();
Long nonce = ng.newNonce();
@Override
protected CreateTableResponse rpcCall() throws Exception {
setPriority(desc.getTableName());
CreateTableRequest request = RequestConverter.buildCreateTableRequest(
desc, splitKeys, nonceGroup, nonce);
return master.createTable(getRpcController(), request);
}
});
return new CreateTableFuture(this, desc, splitKeys, response);
}
private static class CreateTableFuture extends TableFuture {
private final TableDescriptor desc;
private final byte[][] splitKeys;
public CreateTableFuture(final HBaseAdmin admin, final TableDescriptor desc,
final byte[][] splitKeys, final CreateTableResponse response) {
super(admin, desc.getTableName(),
(response != null && response.hasProcId()) ? response.getProcId() : null);
this.splitKeys = splitKeys;
this.desc = desc;
}
@Override
protected TableDescriptor getTableDescriptor() {
return desc;
}
@Override
public String getOperationType() {
return "CREATE";
}
@Override
protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException {
waitForTableEnabled(deadlineTs);
waitForAllRegionsOnline(deadlineTs, splitKeys);
return null;
}
}
@Override
public void deleteTable(final TableName tableName) throws IOException {
get(deleteTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS);
}
@Override
public Future deleteTableAsync(final TableName tableName) throws IOException {
DeleteTableResponse response = executeCallable(
new MasterCallable(getConnection(), getRpcControllerFactory()) {
Long nonceGroup = ng.getNonceGroup();
Long nonce = ng.newNonce();
@Override
protected DeleteTableResponse rpcCall() throws Exception {
setPriority(tableName);
DeleteTableRequest req =
RequestConverter.buildDeleteTableRequest(tableName, nonceGroup,nonce);
return master.deleteTable(getRpcController(), req);
}
});
return new DeleteTableFuture(this, tableName, response);
}
private static class DeleteTableFuture extends TableFuture {
public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName,
final DeleteTableResponse response) {
super(admin, tableName,
(response != null && response.hasProcId()) ? response.getProcId() : null);
}
@Override
public String getOperationType() {
return "DELETE";
}
@Override
protected Void waitOperationResult(final long deadlineTs)
throws IOException, TimeoutException {
waitTableNotFound(deadlineTs);
return null;
}
@Override
protected Void postOperationResult(final Void result, final long deadlineTs)
throws IOException, TimeoutException {
// Delete cached information to prevent clients from using old locations
((ClusterConnection) getAdmin().getConnection()).clearRegionCache(getTableName());
return super.postOperationResult(result, deadlineTs);
}
}
@Override
public HTableDescriptor[] deleteTables(String regex) throws IOException {
return deleteTables(Pattern.compile(regex));
}
/**
* Delete tables matching the passed in pattern and wait on completion.
*
* Warning: Use this method carefully, there is no prompting and the effect is
* immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
* {@link #deleteTable(TableName)}
*
* @param pattern The pattern to match table names against
* @return Table descriptors for tables that couldn't be deleted
* @throws IOException
*/
@Override
public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException {
List failed = new LinkedList<>();
for (HTableDescriptor table : listTables(pattern)) {
try {
deleteTable(table.getTableName());
} catch (IOException ex) {
LOG.info("Failed to delete table " + table.getTableName(), ex);
failed.add(table);
}
}
return failed.toArray(new HTableDescriptor[failed.size()]);
}
@Override
public void truncateTable(final TableName tableName, final boolean preserveSplits)
throws IOException {
get(truncateTableAsync(tableName, preserveSplits), syncWaitTimeout, TimeUnit.MILLISECONDS);
}
@Override
public Future truncateTableAsync(final TableName tableName, final boolean preserveSplits)
throws IOException {
TruncateTableResponse response =
executeCallable(new MasterCallable(getConnection(),
getRpcControllerFactory()) {
Long nonceGroup = ng.getNonceGroup();
Long nonce = ng.newNonce();
@Override
protected TruncateTableResponse rpcCall() throws Exception {
setPriority(tableName);
LOG.info("Started truncating " + tableName);
TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(
tableName, preserveSplits, nonceGroup, nonce);
return master.truncateTable(getRpcController(), req);
}
});
return new TruncateTableFuture(this, tableName, preserveSplits, response);
}
private static class TruncateTableFuture extends TableFuture {
private final boolean preserveSplits;
public TruncateTableFuture(final HBaseAdmin admin, final TableName tableName,
final boolean preserveSplits, final TruncateTableResponse response) {
super(admin, tableName,
(response != null && response.hasProcId()) ? response.getProcId() : null);
this.preserveSplits = preserveSplits;
}
@Override
public String getOperationType() {
return "TRUNCATE";
}
@Override
protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException {
waitForTableEnabled(deadlineTs);
// once the table is enabled, we know the operation is done. so we can fetch the splitKeys
byte[][] splitKeys = preserveSplits ? getAdmin().getTableSplits(getTableName()) : null;
waitForAllRegionsOnline(deadlineTs, splitKeys);
return null;
}
}
private byte[][] getTableSplits(final TableName tableName) throws IOException {
byte[][] splits = null;
try (RegionLocator locator = getConnection().getRegionLocator(tableName)) {
byte[][] startKeys = locator.getStartKeys();
if (startKeys.length == 1) {
return splits;
}
splits = new byte[startKeys.length - 1][];
for (int i = 1; i < startKeys.length; i++) {
splits[i - 1] = startKeys[i];
}
}
return splits;
}
@Override
public void enableTable(final TableName tableName)
throws IOException {
get(enableTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS);
}
@Override
public Future enableTableAsync(final TableName tableName) throws IOException {
TableName.isLegalFullyQualifiedTableName(tableName.getName());
EnableTableResponse response = executeCallable(
new MasterCallable(getConnection(), getRpcControllerFactory()) {
Long nonceGroup = ng.getNonceGroup();
Long nonce = ng.newNonce();
@Override
protected EnableTableResponse rpcCall() throws Exception {
setPriority(tableName);
LOG.info("Started enable of " + tableName);
EnableTableRequest req =
RequestConverter.buildEnableTableRequest(tableName, nonceGroup, nonce);
return master.enableTable(getRpcController(),req);
}
});
return new EnableTableFuture(this, tableName, response);
}
private static class EnableTableFuture extends TableFuture {
public EnableTableFuture(final HBaseAdmin admin, final TableName tableName,
final EnableTableResponse response) {
super(admin, tableName,
(response != null && response.hasProcId()) ? response.getProcId() : null);
}
@Override
public String getOperationType() {
return "ENABLE";
}
@Override
protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException {
waitForTableEnabled(deadlineTs);
return null;
}
}
@Override
public HTableDescriptor[] enableTables(String regex) throws IOException {
return enableTables(Pattern.compile(regex));
}
@Override
public HTableDescriptor[] enableTables(Pattern pattern) throws IOException {
List failed = new LinkedList<>();
for (HTableDescriptor table : listTables(pattern)) {
if (isTableDisabled(table.getTableName())) {
try {
enableTable(table.getTableName());
} catch (IOException ex) {
LOG.info("Failed to enable table " + table.getTableName(), ex);
failed.add(table);
}
}
}
return failed.toArray(new HTableDescriptor[failed.size()]);
}
@Override
public void disableTable(final TableName tableName)
throws IOException {
get(disableTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS);
}
@Override
public Future disableTableAsync(final TableName tableName) throws IOException {
TableName.isLegalFullyQualifiedTableName(tableName.getName());
DisableTableResponse response = executeCallable(
new MasterCallable(getConnection(), getRpcControllerFactory()) {
Long nonceGroup = ng.getNonceGroup();
Long nonce = ng.newNonce();
@Override
protected DisableTableResponse rpcCall() throws Exception {
setPriority(tableName);
LOG.info("Started disable of " + tableName);
DisableTableRequest req =
RequestConverter.buildDisableTableRequest(
tableName, nonceGroup, nonce);
return master.disableTable(getRpcController(), req);
}
});
return new DisableTableFuture(this, tableName, response);
}
private static class DisableTableFuture extends TableFuture {
public DisableTableFuture(final HBaseAdmin admin, final TableName tableName,
final DisableTableResponse response) {
super(admin, tableName,
(response != null && response.hasProcId()) ? response.getProcId() : null);
}
@Override
public String getOperationType() {
return "DISABLE";
}
@Override
protected Void waitOperationResult(long deadlineTs) throws IOException, TimeoutException {
waitForTableDisabled(deadlineTs);
return null;
}
}
@Override
public HTableDescriptor[] disableTables(String regex) throws IOException {
return disableTables(Pattern.compile(regex));
}
@Override
public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
List failed = new LinkedList<>();
for (HTableDescriptor table : listTables(pattern)) {
if (isTableEnabled(table.getTableName())) {
try {
disableTable(table.getTableName());
} catch (IOException ex) {
LOG.info("Failed to disable table " + table.getTableName(), ex);
failed.add(table);
}
}
}
return failed.toArray(new HTableDescriptor[failed.size()]);
}
@Override
public boolean isTableEnabled(final TableName tableName) throws IOException {
checkTableExists(tableName);
return executeCallable(new RpcRetryingCallable() {
@Override
protected Boolean rpcCall(int callTimeout) throws Exception {
TableState tableState = MetaTableAccessor.getTableState(getConnection(), tableName);
if (tableState == null) {
throw new TableNotFoundException(tableName);
}
return tableState.inStates(TableState.State.ENABLED);
}
});
}
@Override
public boolean isTableDisabled(TableName tableName) throws IOException {
checkTableExists(tableName);
return connection.isTableDisabled(tableName);
}
@Override
public boolean isTableAvailable(TableName tableName) throws IOException {
return connection.isTableAvailable(tableName, null);
}
@Override
public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException {
return connection.isTableAvailable(tableName, splitKeys);
}
@Override
public Pair getAlterStatus(final TableName tableName) throws IOException {
return executeCallable(new MasterCallable>(getConnection(),
getRpcControllerFactory()) {
@Override
protected Pair rpcCall() throws Exception {
setPriority(tableName);
GetSchemaAlterStatusRequest req = RequestConverter
.buildGetSchemaAlterStatusRequest(tableName);
GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(getRpcController(), req);
Pair pair = new Pair<>(ret.getYetToUpdateRegions(),
ret.getTotalRegions());
return pair;
}
});
}
@Override
public Pair getAlterStatus(final byte[] tableName) throws IOException {
return getAlterStatus(TableName.valueOf(tableName));
}
@Override
public void addColumnFamily(final TableName tableName, final ColumnFamilyDescriptor columnFamily)
throws IOException {
get(addColumnFamilyAsync(tableName, columnFamily), syncWaitTimeout, TimeUnit.MILLISECONDS);
}
@Override
public Future addColumnFamilyAsync(final TableName tableName,
final ColumnFamilyDescriptor columnFamily) throws IOException {
AddColumnResponse response =
executeCallable(new MasterCallable(getConnection(),
getRpcControllerFactory()) {
Long nonceGroup = ng.getNonceGroup();
Long nonce = ng.newNonce();
@Override
protected AddColumnResponse rpcCall() throws Exception {
setPriority(tableName);
AddColumnRequest req =
RequestConverter.buildAddColumnRequest(tableName, columnFamily, nonceGroup, nonce);
return master.addColumn(getRpcController(), req);
}
});
return new AddColumnFamilyFuture(this, tableName, response);
}
private static class AddColumnFamilyFuture extends ModifyTableFuture {
public AddColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName,
final AddColumnResponse response) {
super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId()
: null);
}
@Override
public String getOperationType() {
return "ADD_COLUMN_FAMILY";
}
}
/**
* {@inheritDoc}
* @deprecated Since 2.0. Will be removed in 3.0. Use
* {@link #deleteColumnFamily(TableName, byte[])} instead.
*/
@Override
@Deprecated
public void deleteColumn(final TableName tableName, final byte[] columnFamily)
throws IOException {
deleteColumnFamily(tableName, columnFamily);
}
@Override
public void deleteColumnFamily(final TableName tableName, final byte[] columnFamily)
throws IOException {
get(deleteColumnFamilyAsync(tableName, columnFamily), syncWaitTimeout, TimeUnit.MILLISECONDS);
}
@Override
public Future deleteColumnFamilyAsync(final TableName tableName, final byte[] columnFamily)
throws IOException {
DeleteColumnResponse response =
executeCallable(new MasterCallable(getConnection(),
getRpcControllerFactory()) {
Long nonceGroup = ng.getNonceGroup();
Long nonce = ng.newNonce();
@Override
protected DeleteColumnResponse rpcCall() throws Exception {
setPriority(tableName);
DeleteColumnRequest req =
RequestConverter.buildDeleteColumnRequest(tableName, columnFamily,
nonceGroup, nonce);
return master.deleteColumn(getRpcController(), req);
}
});
return new DeleteColumnFamilyFuture(this, tableName, response);
}
private static class DeleteColumnFamilyFuture extends ModifyTableFuture {
public DeleteColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName,
final DeleteColumnResponse response) {
super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId()
: null);
}
@Override
public String getOperationType() {
return "DELETE_COLUMN_FAMILY";
}
}
@Override
public void modifyColumnFamily(final TableName tableName,
final ColumnFamilyDescriptor columnFamily) throws IOException {
get(modifyColumnFamilyAsync(tableName, columnFamily), syncWaitTimeout, TimeUnit.MILLISECONDS);
}
@Override
public Future modifyColumnFamilyAsync(final TableName tableName,
final ColumnFamilyDescriptor columnFamily) throws IOException {
ModifyColumnResponse response =
executeCallable(new MasterCallable(getConnection(),
getRpcControllerFactory()) {
Long nonceGroup = ng.getNonceGroup();
Long nonce = ng.newNonce();
@Override
protected ModifyColumnResponse rpcCall() throws Exception {
setPriority(tableName);
ModifyColumnRequest req =
RequestConverter.buildModifyColumnRequest(tableName, columnFamily,
nonceGroup, nonce);
return master.modifyColumn(getRpcController(), req);
}
});
return new ModifyColumnFamilyFuture(this, tableName, response);
}
private static class ModifyColumnFamilyFuture extends ModifyTableFuture {
public ModifyColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName,
final ModifyColumnResponse response) {
super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId()
: null);
}
@Override
public String getOperationType() {
return "MODIFY_COLUMN_FAMILY";
}
}
@Deprecated
@Override
public void closeRegion(final String regionName, final String unused) throws IOException {
unassign(Bytes.toBytes(regionName), true);
}
@Deprecated
@Override
public void closeRegion(final byte [] regionName, final String unused) throws IOException {
unassign(regionName, true);
}
@Deprecated
@Override
public boolean closeRegionWithEncodedRegionName(final String encodedRegionName,
final String unused) throws IOException {
unassign(Bytes.toBytes(encodedRegionName), true);
return true;
}
@Deprecated
@Override
public void closeRegion(final ServerName unused, final HRegionInfo hri) throws IOException {
unassign(hri.getRegionName(), true);
}
/**
* @param sn
* @return List of {@link HRegionInfo}.
* @throws IOException
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
* Use {@link #getRegions(ServerName)}.
*/
@Deprecated
@Override
public List getOnlineRegions(final ServerName sn) throws IOException {
return getRegions(sn).stream().map(ImmutableHRegionInfo::new).collect(Collectors.toList());
}
@Override
public void flush(final TableName tableName) throws IOException {
checkTableExists(tableName);
if (isTableDisabled(tableName)) {
LOG.info("Table is disabled: " + tableName.getNameAsString());
return;
}
execProcedure("flush-table-proc", tableName.getNameAsString(), new HashMap<>());
}
@Override
public void flushRegion(final byte[] regionName) throws IOException {
Pair regionServerPair = getRegion(regionName);
if (regionServerPair == null) {
throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName));
}
if (regionServerPair.getSecond() == null) {
throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
}
final RegionInfo regionInfo = regionServerPair.getFirst();
ServerName serverName = regionServerPair.getSecond();
flush(this.connection.getAdmin(serverName), regionInfo);
}
private void flush(AdminService.BlockingInterface admin, final RegionInfo info)
throws IOException {
ProtobufUtil.call(() -> {
// TODO: There is no timeout on this controller. Set one!
HBaseRpcController controller = rpcControllerFactory.newController();
FlushRegionRequest request =
RequestConverter.buildFlushRegionRequest(info.getRegionName());
admin.flushRegion(controller, request);
return null;
});
}
@Override
public void flushRegionServer(ServerName serverName) throws IOException {
for (RegionInfo region : getRegions(serverName)) {
flush(this.connection.getAdmin(serverName), region);
}
}
/**
* {@inheritDoc}
*/
@Override
public void compact(final TableName tableName)
throws IOException {
compact(tableName, null, false, CompactType.NORMAL);
}
@Override
public void compactRegion(final byte[] regionName)
throws IOException {
compactRegion(regionName, null, false);
}
/**
* {@inheritDoc}
*/
@Override
public void compact(final TableName tableName, final byte[] columnFamily)
throws IOException {
compact(tableName, columnFamily, false, CompactType.NORMAL);
}
/**
* {@inheritDoc}
*/
@Override
public void compactRegion(final byte[] regionName, final byte[] columnFamily)
throws IOException {
compactRegion(regionName, columnFamily, false);
}
@Override
public void compactRegionServer(final ServerName serverName) throws IOException {
for (RegionInfo region : getRegions(serverName)) {
compact(this.connection.getAdmin(serverName), region, false, null);
}
}
@Override
public void majorCompactRegionServer(final ServerName serverName) throws IOException {
for (RegionInfo region : getRegions(serverName)) {
compact(this.connection.getAdmin(serverName), region, true, null);
}
}
@Override
public void majorCompact(final TableName tableName)
throws IOException {
compact(tableName, null, true, CompactType.NORMAL);
}
@Override
public void majorCompactRegion(final byte[] regionName)
throws IOException {
compactRegion(regionName, null, true);
}
/**
* {@inheritDoc}
*/
@Override
public void majorCompact(final TableName tableName, final byte[] columnFamily)
throws IOException {
compact(tableName, columnFamily, true, CompactType.NORMAL);
}
@Override
public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
throws IOException {
compactRegion(regionName, columnFamily, true);
}
/**
* Compact a table.
* Asynchronous operation.
*
* @param tableName table or region to compact
* @param columnFamily column family within a table or region
* @param major True if we are to do a major compaction.
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
* @throws IOException if a remote or network exception occurs
*/
private void compact(final TableName tableName, final byte[] columnFamily,final boolean major,
CompactType compactType) throws IOException {
switch (compactType) {
case MOB:
compact(this.connection.getAdminForMaster(), RegionInfo.createMobRegionInfo(tableName),
major, columnFamily);
break;
case NORMAL:
checkTableExists(tableName);
for (HRegionLocation loc :connection.locateRegions(tableName, false, false)) {
ServerName sn = loc.getServerName();
if (sn == null) {
continue;
}
try {
compact(this.connection.getAdmin(sn), loc.getRegion(), major, columnFamily);
} catch (NotServingRegionException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Trying to" + (major ? " major" : "") + " compact " + loc.getRegion() +
": " + StringUtils.stringifyException(e));
}
}
}
break;
default:
throw new IllegalArgumentException("Unknown compactType: " + compactType);
}
}
/**
* Compact an individual region.
* Asynchronous operation.
*
* @param regionName region to compact
* @param columnFamily column family within a table or region
* @param major True if we are to do a major compaction.
* @throws IOException if a remote or network exception occurs
* @throws InterruptedException
*/
private void compactRegion(final byte[] regionName, final byte[] columnFamily,
final boolean major) throws IOException {
Pair regionServerPair = getRegion(regionName);
if (regionServerPair == null) {
throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
}
if (regionServerPair.getSecond() == null) {
throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
}
compact(this.connection.getAdmin(regionServerPair.getSecond()), regionServerPair.getFirst(),
major, columnFamily);
}
private void compact(AdminService.BlockingInterface admin, RegionInfo hri, boolean major,
byte[] family) throws IOException {
Callable callable = new Callable() {
@Override
public Void call() throws Exception {
// TODO: There is no timeout on this controller. Set one!
HBaseRpcController controller = rpcControllerFactory.newController();
CompactRegionRequest request =
RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family);
admin.compactRegion(controller, request);
return null;
}
};
ProtobufUtil.call(callable);
}
@Override
public void move(final byte[] encodedRegionName, final byte[] destServerName) throws IOException {
executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Void rpcCall() throws Exception {
setPriority(encodedRegionName);
MoveRegionRequest request =
RequestConverter.buildMoveRegionRequest(encodedRegionName,
destServerName != null ? ServerName.valueOf(Bytes.toString(destServerName)) : null);
master.moveRegion(getRpcController(), request);
return null;
}
});
}
@Override
public void assign(final byte [] regionName) throws MasterNotRunningException,
ZooKeeperConnectionException, IOException {
executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Void rpcCall() throws Exception {
setPriority(regionName);
AssignRegionRequest request =
RequestConverter.buildAssignRegionRequest(getRegionName(regionName));
master.assignRegion(getRpcController(), request);
return null;
}
});
}
@Override
public void unassign(final byte [] regionName, final boolean force) throws IOException {
final byte[] toBeUnassigned = getRegionName(regionName);
executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Void rpcCall() throws Exception {
setPriority(regionName);
UnassignRegionRequest request =
RequestConverter.buildUnassignRegionRequest(toBeUnassigned, force);
master.unassignRegion(getRpcController(), request);
return null;
}
});
}
@Override
public void offline(final byte [] regionName)
throws IOException {
executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Void rpcCall() throws Exception {
setPriority(regionName);
master.offlineRegion(getRpcController(),
RequestConverter.buildOfflineRegionRequest(regionName));
return null;
}
});
}
@Override
public boolean balancerSwitch(final boolean on, final boolean synchronous)
throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Boolean rpcCall() throws Exception {
SetBalancerRunningRequest req =
RequestConverter.buildSetBalancerRunningRequest(on, synchronous);
return master.setBalancerRunning(getRpcController(), req).getPrevBalanceValue();
}
});
}
@Override
public boolean balance() throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Boolean rpcCall() throws Exception {
return master.balance(getRpcController(),
RequestConverter.buildBalanceRequest(false)).getBalancerRan();
}
});
}
@Override
public boolean balance(final boolean force) throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Boolean rpcCall() throws Exception {
return master.balance(getRpcController(),
RequestConverter.buildBalanceRequest(force)).getBalancerRan();
}
});
}
@Override
public boolean isBalancerEnabled() throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Boolean rpcCall() throws Exception {
return master.isBalancerEnabled(getRpcController(),
RequestConverter.buildIsBalancerEnabledRequest()).getEnabled();
}
});
}
/**
* {@inheritDoc}
*/
@Override
public CacheEvictionStats clearBlockCache(final TableName tableName) throws IOException {
checkTableExists(tableName);
CacheEvictionStatsBuilder cacheEvictionStats = CacheEvictionStats.builder();
List> pairs =
MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
Map> regionInfoByServerName =
pairs.stream()
.filter(pair -> !(pair.getFirst().isOffline()))
.filter(pair -> pair.getSecond() != null)
.collect(Collectors.groupingBy(pair -> pair.getSecond(),
Collectors.mapping(pair -> pair.getFirst(), Collectors.toList())));
for (Map.Entry> entry : regionInfoByServerName.entrySet()) {
CacheEvictionStats stats = clearBlockCache(entry.getKey(), entry.getValue());
cacheEvictionStats = cacheEvictionStats.append(stats);
if (stats.getExceptionCount() > 0) {
for (Map.Entry exception : stats.getExceptions().entrySet()) {
LOG.debug("Failed to clear block cache for "
+ Bytes.toStringBinary(exception.getKey())
+ " on " + entry.getKey() + ": ", exception.getValue());
}
}
}
return cacheEvictionStats.build();
}
private CacheEvictionStats clearBlockCache(final ServerName sn, final List hris)
throws IOException {
HBaseRpcController controller = rpcControllerFactory.newController();
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
ClearRegionBlockCacheRequest request =
RequestConverter.buildClearRegionBlockCacheRequest(hris);
ClearRegionBlockCacheResponse response;
try {
response = admin.clearRegionBlockCache(controller, request);
return ProtobufUtil.toCacheEvictionStats(response.getStats());
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
/**
* Invoke region normalizer. Can NOT run for various reasons. Check logs.
*
* @return True if region normalizer ran, false otherwise.
*/
@Override
public boolean normalize() throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Boolean rpcCall() throws Exception {
return master.normalize(getRpcController(),
RequestConverter.buildNormalizeRequest()).getNormalizerRan();
}
});
}
@Override
public boolean isNormalizerEnabled() throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Boolean rpcCall() throws Exception {
return master.isNormalizerEnabled(getRpcController(),
RequestConverter.buildIsNormalizerEnabledRequest()).getEnabled();
}
});
}
@Override
public boolean normalizerSwitch(final boolean on) throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Boolean rpcCall() throws Exception {
SetNormalizerRunningRequest req =
RequestConverter.buildSetNormalizerRunningRequest(on);
return master.setNormalizerRunning(getRpcController(), req).getPrevNormalizerValue();
}
});
}
@Override
public boolean catalogJanitorSwitch(final boolean enable) throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Boolean rpcCall() throws Exception {
return master.enableCatalogJanitor(getRpcController(),
RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
}
});
}
@Override
public int runCatalogJanitor() throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Integer rpcCall() throws Exception {
return master.runCatalogScan(getRpcController(),
RequestConverter.buildCatalogScanRequest()).getScanResult();
}
});
}
@Override
public boolean isCatalogJanitorEnabled() throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Boolean rpcCall() throws Exception {
return master.isCatalogJanitorEnabled(getRpcController(),
RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
}
});
}
@Override
public boolean cleanerChoreSwitch(final boolean on) throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override public Boolean rpcCall() throws Exception {
return master.setCleanerChoreRunning(getRpcController(),
RequestConverter.buildSetCleanerChoreRunningRequest(on)).getPrevValue();
}
});
}
@Override
public boolean runCleanerChore() throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override public Boolean rpcCall() throws Exception {
return master.runCleanerChore(getRpcController(),
RequestConverter.buildRunCleanerChoreRequest()).getCleanerChoreRan();
}
});
}
@Override
public boolean isCleanerChoreEnabled() throws IOException {
return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override public Boolean rpcCall() throws Exception {
return master.isCleanerChoreEnabled(getRpcController(),
RequestConverter.buildIsCleanerChoreEnabledRequest()).getValue();
}
});
}
/**
* Merge two regions. Synchronous operation.
* Note: It is not feasible to predict the length of merge.
* Therefore, this is for internal testing only.
* @param nameOfRegionA encoded or full name of region a
* @param nameOfRegionB encoded or full name of region b
* @param forcible true if do a compulsory merge, otherwise we will only merge
* two adjacent regions
* @throws IOException
*/
@VisibleForTesting
public void mergeRegionsSync(
final byte[] nameOfRegionA,
final byte[] nameOfRegionB,
final boolean forcible) throws IOException {
get(
mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible),
syncWaitTimeout,
TimeUnit.MILLISECONDS);
}
/**
* Merge two regions. Asynchronous operation.
* @param nameOfRegionA encoded or full name of region a
* @param nameOfRegionB encoded or full name of region b
* @param forcible true if do a compulsory merge, otherwise we will only merge
* two adjacent regions
* @throws IOException
* @deprecated Since 2.0. Will be removed in 3.0. Use
* {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead.
*/
@Deprecated
@Override
public void mergeRegions(final byte[] nameOfRegionA,
final byte[] nameOfRegionB, final boolean forcible)
throws IOException {
mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible);
}
/**
* Merge two regions. Asynchronous operation.
* @param nameOfRegionA encoded or full name of region a
* @param nameOfRegionB encoded or full name of region b
* @param forcible true if do a compulsory merge, otherwise we will only merge
* two adjacent regions
* @throws IOException
*/
@Override
public Future mergeRegionsAsync(
final byte[] nameOfRegionA,
final byte[] nameOfRegionB,
final boolean forcible) throws IOException {
byte[][] nameofRegionsToMerge = new byte[2][];
nameofRegionsToMerge[0] = nameOfRegionA;
nameofRegionsToMerge[1] = nameOfRegionB;
return mergeRegionsAsync(nameofRegionsToMerge, forcible);
}
/**
* Merge two regions. Asynchronous operation.
* @param nameofRegionsToMerge encoded or full name of daughter regions
* @param forcible true if do a compulsory merge, otherwise we will only merge
* adjacent regions
* @throws IOException
*/
@Override
public Future mergeRegionsAsync(
final byte[][] nameofRegionsToMerge,
final boolean forcible) throws IOException {
assert(nameofRegionsToMerge.length >= 2);
byte[][] encodedNameofRegionsToMerge = new byte[nameofRegionsToMerge.length][];
for(int i = 0; i < nameofRegionsToMerge.length; i++) {
encodedNameofRegionsToMerge[i] = HRegionInfo.isEncodedRegionName(nameofRegionsToMerge[i]) ?
nameofRegionsToMerge[i] :
Bytes.toBytes(HRegionInfo.encodeRegionName(nameofRegionsToMerge[i]));
}
TableName tableName = null;
Pair pair;
for(int i = 0; i < nameofRegionsToMerge.length; i++) {
pair = getRegion(nameofRegionsToMerge[i]);
if (pair != null) {
if (pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
throw new IllegalArgumentException ("Can't invoke merge on non-default regions directly");
}
if (tableName == null) {
tableName = pair.getFirst().getTable();
} else if (!tableName.equals(pair.getFirst().getTable())) {
throw new IllegalArgumentException ("Cannot merge regions from two different tables " +
tableName + " and " + pair.getFirst().getTable());
}
} else {
throw new UnknownRegionException (
"Can't invoke merge on unknown region "
+ Bytes.toStringBinary(encodedNameofRegionsToMerge[i]));
}
}
MergeTableRegionsResponse response =
executeCallable(new MasterCallable(getConnection(),
getRpcControllerFactory()) {
Long nonceGroup = ng.getNonceGroup();
Long nonce = ng.newNonce();
@Override
protected MergeTableRegionsResponse rpcCall() throws Exception {
MergeTableRegionsRequest request = RequestConverter
.buildMergeTableRegionsRequest(
encodedNameofRegionsToMerge,
forcible,
nonceGroup,
nonce);
return master.mergeTableRegions(getRpcController(), request);
}
});
return new MergeTableRegionsFuture(this, tableName, response);
}
private static class MergeTableRegionsFuture extends TableFuture {
public MergeTableRegionsFuture(
final HBaseAdmin admin,
final TableName tableName,
final MergeTableRegionsResponse response) {
super(admin, tableName,
(response != null && response.hasProcId()) ? response.getProcId() : null);
}
public MergeTableRegionsFuture(
final HBaseAdmin admin,
final TableName tableName,
final Long procId) {
super(admin, tableName, procId);
}
@Override
public String getOperationType() {
return "MERGE_REGIONS";
}
}
/**
* Split one region. Synchronous operation.
* Note: It is not feasible to predict the length of split.
* Therefore, this is for internal testing only.
* @param regionName encoded or full name of region
* @param splitPoint key where region splits
* @throws IOException
*/
@VisibleForTesting
public void splitRegionSync(byte[] regionName, byte[] splitPoint) throws IOException {
splitRegionSync(regionName, splitPoint, syncWaitTimeout, TimeUnit.MILLISECONDS);
}
/**
* Split one region. Synchronous operation.
* @param regionName region to be split
* @param splitPoint split point
* @param timeout how long to wait on split
* @param units time units
* @throws IOException
*/
public void splitRegionSync(byte[] regionName, byte[] splitPoint,
final long timeout, final TimeUnit units) throws IOException {
get(
splitRegionAsync(regionName, splitPoint),
timeout,
units);
}
@Override
public Future splitRegionAsync(byte[] regionName, byte[] splitPoint)
throws IOException {
byte[] encodedNameofRegionToSplit = HRegionInfo.isEncodedRegionName(regionName) ?
regionName : Bytes.toBytes(HRegionInfo.encodeRegionName(regionName));
Pair pair = getRegion(regionName);
if (pair != null) {
if (pair.getFirst() != null &&
pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
throw new IllegalArgumentException ("Can't invoke split on non-default regions directly");
}
} else {
throw new UnknownRegionException (
"Can't invoke merge on unknown region "
+ Bytes.toStringBinary(encodedNameofRegionToSplit));
}
return splitRegionAsync(pair.getFirst(), splitPoint);
}
Future splitRegionAsync(RegionInfo hri, byte[] splitPoint) throws IOException {
TableName tableName = hri.getTable();
if (hri.getStartKey() != null && splitPoint != null &&
Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
throw new IOException("should not give a splitkey which equals to startkey!");
}
SplitTableRegionResponse response = executeCallable(
new MasterCallable(getConnection(), getRpcControllerFactory()) {
Long nonceGroup = ng.getNonceGroup();
Long nonce = ng.newNonce();
@Override
protected SplitTableRegionResponse rpcCall() throws Exception {
setPriority(tableName);
SplitTableRegionRequest request = RequestConverter
.buildSplitTableRegionRequest(hri, splitPoint, nonceGroup, nonce);
return master.splitRegion(getRpcController(), request);
}
});
return new SplitTableRegionFuture(this, tableName, response);
}
private static class SplitTableRegionFuture extends TableFuture {
public SplitTableRegionFuture(final HBaseAdmin admin,
final TableName tableName,
final SplitTableRegionResponse response) {
super(admin, tableName,
(response != null && response.hasProcId()) ? response.getProcId() : null);
}
public SplitTableRegionFuture(
final HBaseAdmin admin,
final TableName tableName,
final Long procId) {
super(admin, tableName, procId);
}
@Override
public String getOperationType() {
return "SPLIT_REGION";
}
}
@Override
public void split(final TableName tableName) throws IOException {
split(tableName, null);
}
@Override
public void splitRegion(final byte[] regionName) throws IOException {
splitRegion(regionName, null);
}
@Override
public void split(final TableName tableName, final byte[] splitPoint) throws IOException {
checkTableExists(tableName);
for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) {
ServerName sn = loc.getServerName();
if (sn == null) {
continue;
}
RegionInfo r = loc.getRegion();
// check for parents
if (r.isSplitParent()) {
continue;
}
// if a split point given, only split that particular region
if (r.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID ||
(splitPoint != null && !r.containsRow(splitPoint))) {
continue;
}
// call out to master to do split now
splitRegionAsync(r, splitPoint);
}
}
@Override
public void splitRegion(final byte[] regionName, final byte [] splitPoint) throws IOException {
Pair regionServerPair = getRegion(regionName);
if (regionServerPair == null) {
throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
}
if (regionServerPair.getFirst() != null &&
regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
throw new IllegalArgumentException("Can't split replicas directly. "
+ "Replicas are auto-split when their primary is split.");
}
if (regionServerPair.getSecond() == null) {
throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
}
splitRegionAsync(regionServerPair.getFirst(), splitPoint);
}
@Override
public void modifyTable(final TableName tableName, final TableDescriptor td)
throws IOException {
get(modifyTableAsync(tableName, td), syncWaitTimeout, TimeUnit.MILLISECONDS);
}
@Override
public Future modifyTableAsync(final TableName tableName, final TableDescriptor td)
throws IOException {
if (!tableName.equals(td.getTableName())) {
throw new IllegalArgumentException("the specified table name '" + tableName +
"' doesn't match with the HTD one: " + td.getTableName());
}
return modifyTableAsync(td);
}
private static class ModifyTableFuture extends TableFuture {
public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName,
final ModifyTableResponse response) {
super(admin, tableName,
(response != null && response.hasProcId()) ? response.getProcId() : null);
}
public ModifyTableFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) {
super(admin, tableName, procId);
}
@Override
public String getOperationType() {
return "MODIFY";
}
@Override
protected Void postOperationResult(final Void result, final long deadlineTs)
throws IOException, TimeoutException {
// The modify operation on the table is asynchronous on the server side irrespective
// of whether Procedure V2 is supported or not. So, we wait in the client till
// all regions get updated.
waitForSchemaUpdate(deadlineTs);
return result;
}
}
/**
* @param regionName Name of a region.
* @return a pair of HRegionInfo and ServerName if regionName
is
* a verified region name (we call {@link
* MetaTableAccessor#getRegionLocation(Connection, byte[])}
* else null.
* Throw IllegalArgumentException if regionName
is null.
* @throws IOException
*/
Pair getRegion(final byte[] regionName) throws IOException {
if (regionName == null) {
throw new IllegalArgumentException("Pass a table name or region name");
}
Pair pair = MetaTableAccessor.getRegion(connection, regionName);
if (pair == null) {
final AtomicReference> result = new AtomicReference<>(null);
final String encodedName = Bytes.toString(regionName);
MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
@Override
public boolean visit(Result data) throws IOException {
RegionInfo info = MetaTableAccessor.getRegionInfo(data);
if (info == null) {
LOG.warn("No serialized HRegionInfo in " + data);
return true;
}
RegionLocations rl = MetaTableAccessor.getRegionLocations(data);
boolean matched = false;
ServerName sn = null;
if (rl != null) {
for (HRegionLocation h : rl.getRegionLocations()) {
if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) {
sn = h.getServerName();
info = h.getRegionInfo();
matched = true;
}
}
}
if (!matched) return true;
result.set(new Pair<>(info, sn));
return false; // found the region, stop
}
};
MetaTableAccessor.fullScanRegions(connection, visitor);
pair = result.get();
}
return pair;
}
/**
* If the input is a region name, it is returned as is. If it's an
* encoded region name, the corresponding region is found from meta
* and its region name is returned. If we can't find any region in
* meta matching the input as either region name or encoded region
* name, the input is returned as is. We don't throw unknown
* region exception.
*/
private byte[] getRegionName(
final byte[] regionNameOrEncodedRegionName) throws IOException {
if (Bytes.equals(regionNameOrEncodedRegionName,
HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
|| Bytes.equals(regionNameOrEncodedRegionName,
HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
}
byte[] tmp = regionNameOrEncodedRegionName;
Pair regionServerPair = getRegion(regionNameOrEncodedRegionName);
if (regionServerPair != null && regionServerPair.getFirst() != null) {
tmp = regionServerPair.getFirst().getRegionName();
}
return tmp;
}
/**
* Check if table exists or not
* @param tableName Name of a table.
* @return tableName instance
* @throws IOException if a remote or network exception occurs.
* @throws TableNotFoundException if table does not exist.
*/
private TableName checkTableExists(final TableName tableName)
throws IOException {
return executeCallable(new RpcRetryingCallable() {
@Override
protected TableName rpcCall(int callTimeout) throws Exception {
if (!MetaTableAccessor.tableExists(connection, tableName)) {
throw new TableNotFoundException(tableName);
}
return tableName;
}
});
}
@Override
public synchronized void shutdown() throws IOException {
executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Void rpcCall() throws Exception {
setPriority(HConstants.HIGH_QOS);
master.shutdown(getRpcController(), ShutdownRequest.newBuilder().build());
return null;
}
});
}
@Override
public synchronized void stopMaster() throws IOException {
executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) {
@Override
protected Void rpcCall() throws Exception {
setPriority(HConstants.HIGH_QOS);
master.stopMaster(getRpcController(), StopMasterRequest.newBuilder().build());
return null;
}
});
}
@Override
public synchronized void stopRegionServer(final String hostnamePort)
throws IOException {
String hostname = Addressing.parseHostname(hostnamePort);
int port = Addressing.parsePort(hostnamePort);
final AdminService.BlockingInterface admin =
this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
// TODO: There is no timeout on this controller. Set one!
HBaseRpcController controller = rpcControllerFactory.newController();
controller.setPriority(HConstants.HIGH_QOS);
StopServerRequest request = RequestConverter.buildStopServerRequest(
"Called by admin client " + this.connection.toString());
try {
admin.stopServer(controller, request);
} catch (Exception e) {
throw ProtobufUtil.handleRemoteException(e);
}
}
@Override
public boolean isMasterInMaintenanceMode() throws IOException {
return executeCallable(new MasterCallable(getConnection(),
this.rpcControllerFactory) {
@Override
protected IsInMaintenanceModeResponse rpcCall() throws Exception {
return master.isMasterInMaintenanceMode(getRpcController(),
IsInMaintenanceModeRequest.newBuilder().build());
}
}).getInMaintenanceMode();
}
@Override
public ClusterMetrics getClusterMetrics(EnumSet