org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import static org.apache.hadoop.hbase.HConstants.HIGH_QOS;
import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
import static org.apache.hadoop.hbase.util.FutureUtils.unwrapCompletionException;
import com.google.protobuf.Message;
import com.google.protobuf.RpcChannel;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
import org.apache.hadoop.hbase.CacheEvictionStats;
import org.apache.hadoop.hbase.CacheEvictionStatsAggregator;
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.ClusterMetricsBuilder;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.RegionMetrics;
import org.apache.hadoop.hbase.RegionMetricsBuilder;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
import org.apache.hadoop.hbase.client.Scan.ReadType;
import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
import org.apache.hadoop.hbase.client.replication.TableCFs;
import org.apache.hadoop.hbase.client.security.SecurityCapability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.quotas.QuotaTableUtil;
import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest;
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
import org.apache.hadoop.hbase.security.access.UserPermission;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
import org.apache.hbase.thirdparty.io.netty.util.Timeout;
import org.apache.hbase.thirdparty.io.netty.util.TimerTask;
import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.FlushMasterStoreResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespacesResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnStoreFileTrackerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableStoreFileTrackerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchExceedThrottleQuotaResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SwitchRpcThrottleResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
/**
* The implementation of AsyncAdmin.
*
* The word 'Raw' means that this is a low level class. The returned {@link CompletableFuture} will
* be finished inside the rpc framework thread, which means that the callbacks registered to the
* {@link CompletableFuture} will also be executed inside the rpc framework thread. So users who use
* this class should not try to do time consuming tasks in the callbacks.
* @since 2.0.0
* @see AsyncHBaseAdmin
* @see AsyncConnection#getAdmin()
* @see AsyncConnection#getAdminBuilder()
*/
@InterfaceAudience.Private
class RawAsyncHBaseAdmin implements AsyncAdmin {
public static final String FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc";
private static final Logger LOG = LoggerFactory.getLogger(AsyncHBaseAdmin.class);
private final AsyncConnectionImpl connection;
private final HashedWheelTimer retryTimer;
private final AsyncTable metaTable;
private final long rpcTimeoutNs;
private final long operationTimeoutNs;
private final long pauseNs;
private final long pauseNsForServerOverloaded;
private final int maxAttempts;
private final int startLogErrorsCnt;
private final NonceGenerator ng;
RawAsyncHBaseAdmin(AsyncConnectionImpl connection, HashedWheelTimer retryTimer,
AsyncAdminBuilderBase builder) {
this.connection = connection;
this.retryTimer = retryTimer;
this.metaTable = connection.getTable(META_TABLE_NAME);
this.rpcTimeoutNs = builder.rpcTimeoutNs;
this.operationTimeoutNs = builder.operationTimeoutNs;
this.pauseNs = builder.pauseNs;
if (builder.pauseNsForServerOverloaded < builder.pauseNs) {
LOG.warn(
"Configured value of pauseNsForServerOverloaded is {} ms, which is less than"
+ " the normal pause value {} ms, use the greater one instead",
TimeUnit.NANOSECONDS.toMillis(builder.pauseNsForServerOverloaded),
TimeUnit.NANOSECONDS.toMillis(builder.pauseNs));
this.pauseNsForServerOverloaded = builder.pauseNs;
} else {
this.pauseNsForServerOverloaded = builder.pauseNsForServerOverloaded;
}
this.maxAttempts = builder.maxAttempts;
this.startLogErrorsCnt = builder.startLogErrorsCnt;
this.ng = connection.getNonceGenerator();
}
private MasterRequestCallerBuilder newMasterCaller() {
return this.connection.callerFactory. masterRequest()
.rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
.operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
.pause(pauseNs, TimeUnit.NANOSECONDS)
.pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS)
.maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt);
}
private AdminRequestCallerBuilder newAdminCaller() {
return this.connection.callerFactory. adminRequest()
.rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
.operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
.pause(pauseNs, TimeUnit.NANOSECONDS)
.pauseForServerOverloaded(pauseNsForServerOverloaded, TimeUnit.NANOSECONDS)
.maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt);
}
@FunctionalInterface
private interface MasterRpcCall {
void call(MasterService.Interface stub, HBaseRpcController controller, REQ req,
RpcCallback done);
}
@FunctionalInterface
private interface AdminRpcCall {
void call(AdminService.Interface stub, HBaseRpcController controller, REQ req,
RpcCallback done);
}
@FunctionalInterface
private interface Converter {
D convert(S src) throws IOException;
}
private CompletableFuture call(HBaseRpcController controller,
MasterService.Interface stub, PREQ preq, MasterRpcCall rpcCall,
Converter respConverter) {
CompletableFuture future = new CompletableFuture<>();
rpcCall.call(stub, controller, preq, new RpcCallback() {
@Override
public void run(PRESP resp) {
if (controller.failed()) {
future.completeExceptionally(controller.getFailed());
} else {
try {
future.complete(respConverter.convert(resp));
} catch (IOException e) {
future.completeExceptionally(e);
}
}
}
});
return future;
}
private CompletableFuture adminCall(HBaseRpcController controller,
AdminService.Interface stub, PREQ preq, AdminRpcCall rpcCall,
Converter respConverter) {
CompletableFuture future = new CompletableFuture<>();
rpcCall.call(stub, controller, preq, new RpcCallback() {
@Override
public void run(PRESP resp) {
if (controller.failed()) {
future.completeExceptionally(controller.getFailed());
} else {
try {
future.complete(respConverter.convert(resp));
} catch (IOException e) {
future.completeExceptionally(e);
}
}
}
});
return future;
}
private CompletableFuture procedureCall(PREQ preq,
MasterRpcCall rpcCall, Converter respConverter,
ProcedureBiConsumer consumer) {
return procedureCall(b -> {
}, preq, rpcCall, respConverter, consumer);
}
private CompletableFuture procedureCall(TableName tableName, PREQ preq,
MasterRpcCall rpcCall, Converter respConverter,
ProcedureBiConsumer consumer) {
return procedureCall(b -> b.priority(tableName), preq, rpcCall, respConverter, consumer);
}
private CompletableFuture procedureCall(
Consumer> prioritySetter, PREQ preq,
MasterRpcCall rpcCall, Converter respConverter,
ProcedureBiConsumer consumer) {
MasterRequestCallerBuilder builder = this. newMasterCaller().action((controller,
stub) -> this. call(controller, stub, preq, rpcCall, respConverter));
prioritySetter.accept(builder);
CompletableFuture procFuture = builder.call();
CompletableFuture future = waitProcedureResult(procFuture);
addListener(future, consumer);
return future;
}
@Override
public CompletableFuture tableExists(TableName tableName) {
if (TableName.isMetaTableName(tableName)) {
return CompletableFuture.completedFuture(true);
}
return AsyncMetaTableAccessor.tableExists(metaTable, tableName);
}
@Override
public CompletableFuture> listTableDescriptors(boolean includeSysTables) {
return getTableDescriptors(
RequestConverter.buildGetTableDescriptorsRequest(null, includeSysTables));
}
/**
* {@link #listTableDescriptors(boolean)}
*/
@Override
public CompletableFuture> listTableDescriptors(Pattern pattern,
boolean includeSysTables) {
Preconditions.checkNotNull(pattern,
"pattern is null. If you don't specify a pattern, use listTables(boolean) instead");
return getTableDescriptors(
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables));
}
@Override
public CompletableFuture> listTableDescriptors(List tableNames) {
Preconditions.checkNotNull(tableNames,
"tableNames is null. If you don't specify tableNames, " + "use listTables(boolean) instead");
if (tableNames.isEmpty()) {
return CompletableFuture.completedFuture(Collections.emptyList());
}
return getTableDescriptors(RequestConverter.buildGetTableDescriptorsRequest(tableNames));
}
private CompletableFuture>
getTableDescriptors(GetTableDescriptorsRequest request) {
return this.> newMasterCaller()
.action((controller, stub) -> this.> call(controller, stub, request,
(s, c, req, done) -> s.getTableDescriptors(c, req, done),
(resp) -> ProtobufUtil.toTableDescriptorList(resp)))
.call();
}
@Override
public CompletableFuture> listTableNames(boolean includeSysTables) {
return getTableNames(RequestConverter.buildGetTableNamesRequest(null, includeSysTables));
}
@Override
public CompletableFuture> listTableNames(Pattern pattern,
boolean includeSysTables) {
Preconditions.checkNotNull(pattern,
"pattern is null. If you don't specify a pattern, use listTableNames(boolean) instead");
return getTableNames(RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables));
}
private CompletableFuture> getTableNames(GetTableNamesRequest request) {
return this.> newMasterCaller()
.action((controller, stub) -> this.> call(controller, stub, request,
(s, c, req, done) -> s.getTableNames(c, req, done),
(resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList())))
.call();
}
@Override
public CompletableFuture> listTableDescriptorsByNamespace(String name) {
return this.> newMasterCaller()
.action((controller, stub) -> this.> call(controller, stub,
ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name).build(),
(s, c, req, done) -> s.listTableDescriptorsByNamespace(c, req, done),
(resp) -> ProtobufUtil.toTableDescriptorList(resp)))
.call();
}
@Override
public CompletableFuture> listTableNamesByNamespace(String name) {
return this.> newMasterCaller()
.action((controller, stub) -> this.> call(controller, stub,
ListTableNamesByNamespaceRequest.newBuilder().setNamespaceName(name).build(),
(s, c, req, done) -> s.listTableNamesByNamespace(c, req, done),
(resp) -> ProtobufUtil.toTableNameList(resp.getTableNameList())))
.call();
}
@Override
public CompletableFuture getDescriptor(TableName tableName) {
CompletableFuture future = new CompletableFuture<>();
addListener(this.> newMasterCaller().priority(tableName)
.action((controller, stub) -> this.> call(controller, stub,
RequestConverter.buildGetTableDescriptorsRequest(tableName),
(s, c, req, done) -> s.getTableDescriptors(c, req, done),
(resp) -> resp.getTableSchemaList()))
.call(), (tableSchemas, error) -> {
if (error != null) {
future.completeExceptionally(error);
return;
}
if (!tableSchemas.isEmpty()) {
future.complete(ProtobufUtil.toTableDescriptor(tableSchemas.get(0)));
} else {
future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString()));
}
});
return future;
}
@Override
public CompletableFuture createTable(TableDescriptor desc) {
return createTable(desc.getTableName(),
RequestConverter.buildCreateTableRequest(desc, null, ng.getNonceGroup(), ng.newNonce()));
}
@Override
public CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[] endKey,
int numRegions) {
try {
return createTable(desc, getSplitKeys(startKey, endKey, numRegions));
} catch (IllegalArgumentException e) {
return failedFuture(e);
}
}
@Override
public CompletableFuture createTable(TableDescriptor desc, byte[][] splitKeys) {
Preconditions.checkNotNull(splitKeys, "splitKeys is null. If you don't specify splitKeys,"
+ " use createTable(TableDescriptor) instead");
try {
verifySplitKeys(splitKeys);
return createTable(desc.getTableName(), RequestConverter.buildCreateTableRequest(desc,
splitKeys, ng.getNonceGroup(), ng.newNonce()));
} catch (IllegalArgumentException e) {
return failedFuture(e);
}
}
private CompletableFuture createTable(TableName tableName, CreateTableRequest request) {
Preconditions.checkNotNull(tableName, "table name is null");
return this. procedureCall(tableName, request,
(s, c, req, done) -> s.createTable(c, req, done), (resp) -> resp.getProcId(),
new CreateTableProcedureBiConsumer(tableName));
}
@Override
public CompletableFuture modifyTable(TableDescriptor desc) {
return this. procedureCall(desc.getTableName(),
RequestConverter.buildModifyTableRequest(desc.getTableName(), desc, ng.getNonceGroup(),
ng.newNonce()),
(s, c, req, done) -> s.modifyTable(c, req, done), (resp) -> resp.getProcId(),
new ModifyTableProcedureBiConsumer(this, desc.getTableName()));
}
@Override
public CompletableFuture modifyTableStoreFileTracker(TableName tableName, String dstSFT) {
return this. procedureCall(tableName,
RequestConverter.buildModifyTableStoreFileTrackerRequest(tableName, dstSFT,
ng.getNonceGroup(), ng.newNonce()),
(s, c, req, done) -> s.modifyTableStoreFileTracker(c, req, done),
(resp) -> resp.getProcId(),
new ModifyTableStoreFileTrackerProcedureBiConsumer(this, tableName));
}
@Override
public CompletableFuture deleteTable(TableName tableName) {
return this. procedureCall(tableName,
RequestConverter.buildDeleteTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()),
(s, c, req, done) -> s.deleteTable(c, req, done), (resp) -> resp.getProcId(),
new DeleteTableProcedureBiConsumer(tableName));
}
@Override
public CompletableFuture truncateTable(TableName tableName, boolean preserveSplits) {
return this. procedureCall(tableName,
RequestConverter.buildTruncateTableRequest(tableName, preserveSplits, ng.getNonceGroup(),
ng.newNonce()),
(s, c, req, done) -> s.truncateTable(c, req, done), (resp) -> resp.getProcId(),
new TruncateTableProcedureBiConsumer(tableName));
}
@Override
public CompletableFuture enableTable(TableName tableName) {
return this. procedureCall(tableName,
RequestConverter.buildEnableTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()),
(s, c, req, done) -> s.enableTable(c, req, done), (resp) -> resp.getProcId(),
new EnableTableProcedureBiConsumer(tableName));
}
@Override
public CompletableFuture disableTable(TableName tableName) {
return this. procedureCall(tableName,
RequestConverter.buildDisableTableRequest(tableName, ng.getNonceGroup(), ng.newNonce()),
(s, c, req, done) -> s.disableTable(c, req, done), (resp) -> resp.getProcId(),
new DisableTableProcedureBiConsumer(tableName));
}
/**
* Utility for completing passed TableState {@link CompletableFuture} future
using
* passed parameters. Sets error or boolean result ('true' if table matches the passed-in
* targetState).
*/
private static CompletableFuture completeCheckTableState(
CompletableFuture future, TableState tableState, Throwable error,
TableState.State targetState, TableName tableName) {
if (error != null) {
future.completeExceptionally(error);
} else {
if (tableState != null) {
future.complete(tableState.inStates(targetState));
} else {
future.completeExceptionally(new TableNotFoundException(tableName));
}
}
return future;
}
@Override
public CompletableFuture isTableEnabled(TableName tableName) {
if (TableName.isMetaTableName(tableName)) {
return CompletableFuture.completedFuture(true);
}
CompletableFuture future = new CompletableFuture<>();
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> {
completeCheckTableState(future, tableState.isPresent() ? tableState.get() : null, error,
TableState.State.ENABLED, tableName);
});
return future;
}
@Override
public CompletableFuture isTableDisabled(TableName tableName) {
if (TableName.isMetaTableName(tableName)) {
return CompletableFuture.completedFuture(false);
}
CompletableFuture future = new CompletableFuture<>();
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> {
completeCheckTableState(future, tableState.isPresent() ? tableState.get() : null, error,
TableState.State.DISABLED, tableName);
});
return future;
}
@Override
public CompletableFuture isTableAvailable(TableName tableName) {
return isTableAvailable(tableName, Optional.empty());
}
@Override
public CompletableFuture isTableAvailable(TableName tableName, byte[][] splitKeys) {
Preconditions.checkNotNull(splitKeys, "splitKeys is null. If you don't specify splitKeys,"
+ " use isTableAvailable(TableName) instead");
return isTableAvailable(tableName, Optional.of(splitKeys));
}
private CompletableFuture isTableAvailable(TableName tableName,
Optional splitKeys) {
if (TableName.isMetaTableName(tableName)) {
return connection.registry.getMetaRegionLocations().thenApply(locs -> Stream
.of(locs.getRegionLocations()).allMatch(loc -> loc != null && loc.getServerName() != null));
}
CompletableFuture future = new CompletableFuture<>();
addListener(isTableEnabled(tableName), (enabled, error) -> {
if (error != null) {
if (error instanceof TableNotFoundException) {
future.complete(false);
} else {
future.completeExceptionally(error);
}
return;
}
if (!enabled) {
future.complete(false);
} else {
addListener(AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName),
(locations, error1) -> {
if (error1 != null) {
future.completeExceptionally(error1);
return;
}
List notDeployedRegions = locations.stream()
.filter(loc -> loc.getServerName() == null).collect(Collectors.toList());
if (notDeployedRegions.size() > 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Table " + tableName + " has " + notDeployedRegions.size() + " regions");
}
future.complete(false);
return;
}
Optional available =
splitKeys.map(keys -> compareRegionsWithSplitKeys(locations, keys));
future.complete(available.orElse(true));
});
}
});
return future;
}
private boolean compareRegionsWithSplitKeys(List locations, byte[][] splitKeys) {
int regionCount = 0;
for (HRegionLocation location : locations) {
RegionInfo info = location.getRegion();
if (Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
regionCount++;
continue;
}
for (byte[] splitKey : splitKeys) {
// Just check if the splitkey is available
if (Bytes.equals(info.getStartKey(), splitKey)) {
regionCount++;
break;
}
}
}
return regionCount == splitKeys.length + 1;
}
@Override
public CompletableFuture addColumnFamily(TableName tableName,
ColumnFamilyDescriptor columnFamily) {
return this. procedureCall(tableName,
RequestConverter.buildAddColumnRequest(tableName, columnFamily, ng.getNonceGroup(),
ng.newNonce()),
(s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(),
new AddColumnFamilyProcedureBiConsumer(tableName));
}
@Override
public CompletableFuture deleteColumnFamily(TableName tableName, byte[] columnFamily) {
return this. procedureCall(tableName,
RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, ng.getNonceGroup(),
ng.newNonce()),
(s, c, req, done) -> s.deleteColumn(c, req, done), (resp) -> resp.getProcId(),
new DeleteColumnFamilyProcedureBiConsumer(tableName));
}
@Override
public CompletableFuture modifyColumnFamily(TableName tableName,
ColumnFamilyDescriptor columnFamily) {
return this. procedureCall(tableName,
RequestConverter.buildModifyColumnRequest(tableName, columnFamily, ng.getNonceGroup(),
ng.newNonce()),
(s, c, req, done) -> s.modifyColumn(c, req, done), (resp) -> resp.getProcId(),
new ModifyColumnFamilyProcedureBiConsumer(tableName));
}
@Override
public CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName,
byte[] family, String dstSFT) {
return this. procedureCall(tableName,
RequestConverter.buildModifyColumnStoreFileTrackerRequest(tableName, family, dstSFT,
ng.getNonceGroup(), ng.newNonce()),
(s, c, req, done) -> s.modifyColumnStoreFileTracker(c, req, done),
(resp) -> resp.getProcId(),
new ModifyColumnFamilyStoreFileTrackerProcedureBiConsumer(tableName));
}
@Override
public CompletableFuture createNamespace(NamespaceDescriptor descriptor) {
return this. procedureCall(
RequestConverter.buildCreateNamespaceRequest(descriptor),
(s, c, req, done) -> s.createNamespace(c, req, done), (resp) -> resp.getProcId(),
new CreateNamespaceProcedureBiConsumer(descriptor.getName()));
}
@Override
public CompletableFuture modifyNamespace(NamespaceDescriptor descriptor) {
return this. procedureCall(
RequestConverter.buildModifyNamespaceRequest(descriptor),
(s, c, req, done) -> s.modifyNamespace(c, req, done), (resp) -> resp.getProcId(),
new ModifyNamespaceProcedureBiConsumer(descriptor.getName()));
}
@Override
public CompletableFuture deleteNamespace(String name) {
return this. procedureCall(
RequestConverter.buildDeleteNamespaceRequest(name),
(s, c, req, done) -> s.deleteNamespace(c, req, done), (resp) -> resp.getProcId(),
new DeleteNamespaceProcedureBiConsumer(name));
}
@Override
public CompletableFuture getNamespaceDescriptor(String name) {
return this. newMasterCaller()
.action((controller, stub) -> this. call(controller, stub,
RequestConverter.buildGetNamespaceDescriptorRequest(name),
(s, c, req, done) -> s.getNamespaceDescriptor(c, req, done),
(resp) -> ProtobufUtil.toNamespaceDescriptor(resp.getNamespaceDescriptor())))
.call();
}
@Override
public CompletableFuture> listNamespaces() {
return this.> newMasterCaller()
.action((controller, stub) -> this.> call(controller, stub, ListNamespacesRequest.newBuilder().build(),
(s, c, req, done) -> s.listNamespaces(c, req, done),
(resp) -> resp.getNamespaceNameList()))
.call();
}
@Override
public CompletableFuture> listNamespaceDescriptors() {
return this.> newMasterCaller()
.action((controller, stub) -> this.> call(controller, stub,
ListNamespaceDescriptorsRequest.newBuilder().build(),
(s, c, req, done) -> s.listNamespaceDescriptors(c, req, done),
(resp) -> ProtobufUtil.toNamespaceDescriptorList(resp)))
.call();
}
@Override
public CompletableFuture> getRegions(ServerName serverName) {
return this.> newAdminCaller()
.action((controller, stub) -> this.> adminCall(controller, stub,
RequestConverter.buildGetOnlineRegionRequest(),
(s, c, req, done) -> s.getOnlineRegion(c, req, done),
resp -> ProtobufUtil.getRegionInfos(resp)))
.serverName(serverName).call();
}
@Override
public CompletableFuture> getRegions(TableName tableName) {
if (tableName.equals(META_TABLE_NAME)) {
return connection.registry.getMetaRegionLocations()
.thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion)
.collect(Collectors.toList()));
} else {
return AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).thenApply(
locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList()));
}
}
@Override
public CompletableFuture flush(TableName tableName) {
return flush(tableName, null);
}
@Override
public CompletableFuture flush(TableName tableName, byte[] columnFamily) {
CompletableFuture future = new CompletableFuture<>();
addListener(tableExists(tableName), (exists, err) -> {
if (err != null) {
future.completeExceptionally(err);
} else if (!exists) {
future.completeExceptionally(new TableNotFoundException(tableName));
} else {
addListener(isTableEnabled(tableName), (tableEnabled, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else if (!tableEnabled) {
future.completeExceptionally(new TableNotEnabledException(tableName));
} else {
Map props = new HashMap<>();
if (columnFamily != null) {
props.put(HConstants.FAMILY_KEY_STR, Bytes.toString(columnFamily));
}
addListener(
execProcedure(FLUSH_TABLE_PROCEDURE_SIGNATURE, tableName.getNameAsString(), props),
(ret, err3) -> {
if (err3 != null) {
future.completeExceptionally(err3);
} else {
future.complete(ret);
}
});
}
});
}
});
return future;
}
@Override
public CompletableFuture flushRegion(byte[] regionName) {
return flushRegion(regionName, null);
}
@Override
public CompletableFuture flushRegion(byte[] regionName, byte[] columnFamily) {
CompletableFuture future = new CompletableFuture<>();
addListener(getRegionLocation(regionName), (location, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
ServerName serverName = location.getServerName();
if (serverName == null) {
future
.completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName)));
return;
}
addListener(flush(serverName, location.getRegion(), columnFamily), (ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
private CompletableFuture flush(final ServerName serverName, final RegionInfo regionInfo,
byte[] columnFamily) {
return this. newAdminCaller().serverName(serverName)
.action(
(controller, stub) -> this. adminCall(controller, stub, RequestConverter
.buildFlushRegionRequest(regionInfo.getRegionName(), columnFamily, false),
(s, c, req, done) -> s.flushRegion(c, req, done), resp -> null))
.call();
}
@Override
public CompletableFuture flushRegionServer(ServerName sn) {
CompletableFuture future = new CompletableFuture<>();
addListener(getRegions(sn), (hRegionInfos, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
List> compactFutures = new ArrayList<>();
if (hRegionInfos != null) {
hRegionInfos.forEach(region -> compactFutures.add(flush(sn, region, null)));
}
addListener(CompletableFuture.allOf(
compactFutures.toArray(new CompletableFuture>[compactFutures.size()])), (ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
@Override
public CompletableFuture compact(TableName tableName, CompactType compactType) {
return compact(tableName, null, false, compactType);
}
@Override
public CompletableFuture compact(TableName tableName, byte[] columnFamily,
CompactType compactType) {
Preconditions.checkNotNull(columnFamily, "columnFamily is null. "
+ "If you don't specify a columnFamily, use compact(TableName) instead");
return compact(tableName, columnFamily, false, compactType);
}
@Override
public CompletableFuture compactRegion(byte[] regionName) {
return compactRegion(regionName, null, false);
}
@Override
public CompletableFuture compactRegion(byte[] regionName, byte[] columnFamily) {
Preconditions.checkNotNull(columnFamily, "columnFamily is null."
+ " If you don't specify a columnFamily, use compactRegion(regionName) instead");
return compactRegion(regionName, columnFamily, false);
}
@Override
public CompletableFuture majorCompact(TableName tableName, CompactType compactType) {
return compact(tableName, null, true, compactType);
}
@Override
public CompletableFuture majorCompact(TableName tableName, byte[] columnFamily,
CompactType compactType) {
Preconditions.checkNotNull(columnFamily, "columnFamily is null."
+ "If you don't specify a columnFamily, use compact(TableName) instead");
return compact(tableName, columnFamily, true, compactType);
}
@Override
public CompletableFuture majorCompactRegion(byte[] regionName) {
return compactRegion(regionName, null, true);
}
@Override
public CompletableFuture majorCompactRegion(byte[] regionName, byte[] columnFamily) {
Preconditions.checkNotNull(columnFamily, "columnFamily is null."
+ " If you don't specify a columnFamily, use majorCompactRegion(regionName) instead");
return compactRegion(regionName, columnFamily, true);
}
@Override
public CompletableFuture compactRegionServer(ServerName sn) {
return compactRegionServer(sn, false);
}
@Override
public CompletableFuture majorCompactRegionServer(ServerName sn) {
return compactRegionServer(sn, true);
}
private CompletableFuture compactRegionServer(ServerName sn, boolean major) {
CompletableFuture future = new CompletableFuture<>();
addListener(getRegions(sn), (hRegionInfos, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
List> compactFutures = new ArrayList<>();
if (hRegionInfos != null) {
hRegionInfos.forEach(region -> compactFutures.add(compact(sn, region, major, null)));
}
addListener(CompletableFuture.allOf(
compactFutures.toArray(new CompletableFuture>[compactFutures.size()])), (ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
private CompletableFuture compactRegion(byte[] regionName, byte[] columnFamily,
boolean major) {
CompletableFuture future = new CompletableFuture<>();
addListener(getRegionLocation(regionName), (location, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
ServerName serverName = location.getServerName();
if (serverName == null) {
future
.completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName)));
return;
}
addListener(compact(location.getServerName(), location.getRegion(), major, columnFamily),
(ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
/**
* List all region locations for the specific table.
*/
private CompletableFuture> getTableHRegionLocations(TableName tableName) {
if (TableName.META_TABLE_NAME.equals(tableName)) {
CompletableFuture> future = new CompletableFuture<>();
addListener(connection.registry.getMetaRegionLocations(), (metaRegions, err) -> {
if (err != null) {
future.completeExceptionally(err);
} else if (
metaRegions == null || metaRegions.isEmpty()
|| metaRegions.getDefaultRegionLocation() == null
) {
future.completeExceptionally(new IOException("meta region does not found"));
} else {
future.complete(Collections.singletonList(metaRegions.getDefaultRegionLocation()));
}
});
return future;
} else {
// For non-meta table, we fetch all locations by scanning hbase:meta table
return AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName);
}
}
/**
* Compact column family of a table, Asynchronous operation even if CompletableFuture.get()
*/
private CompletableFuture compact(TableName tableName, byte[] columnFamily, boolean major,
CompactType compactType) {
CompletableFuture future = new CompletableFuture<>();
switch (compactType) {
case MOB:
addListener(connection.registry.getActiveMaster(), (serverName, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
RegionInfo regionInfo = RegionInfo.createMobRegionInfo(tableName);
addListener(compact(serverName, regionInfo, major, columnFamily), (ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
break;
case NORMAL:
addListener(getTableHRegionLocations(tableName), (locations, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
if (locations == null || locations.isEmpty()) {
future.completeExceptionally(new TableNotFoundException(tableName));
}
CompletableFuture>[] compactFutures =
locations.stream().filter(l -> l.getRegion() != null)
.filter(l -> !l.getRegion().isOffline()).filter(l -> l.getServerName() != null)
.map(l -> compact(l.getServerName(), l.getRegion(), major, columnFamily))
.toArray(CompletableFuture>[]::new);
// future complete unless all of the compact futures are completed.
addListener(CompletableFuture.allOf(compactFutures), (ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
break;
default:
throw new IllegalArgumentException("Unknown compactType: " + compactType);
}
return future;
}
/**
* Compact the region at specific region server.
*/
private CompletableFuture compact(final ServerName sn, final RegionInfo hri,
final boolean major, byte[] columnFamily) {
return this. newAdminCaller().serverName(sn)
.action((controller, stub) -> this. adminCall(controller, stub,
RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, columnFamily),
(s, c, req, done) -> s.compactRegion(c, req, done), resp -> null))
.call();
}
private byte[] toEncodeRegionName(byte[] regionName) {
return RegionInfo.isEncodedRegionName(regionName)
? regionName
: Bytes.toBytes(RegionInfo.encodeRegionName(regionName));
}
private void checkAndGetTableName(byte[] encodeRegionName, AtomicReference tableName,
CompletableFuture result) {
addListener(getRegionLocation(encodeRegionName), (location, err) -> {
if (err != null) {
result.completeExceptionally(err);
return;
}
RegionInfo regionInfo = location.getRegion();
if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
result.completeExceptionally(
new IllegalArgumentException("Can't invoke merge on non-default regions directly"));
return;
}
if (!tableName.compareAndSet(null, regionInfo.getTable())) {
if (!tableName.get().equals(regionInfo.getTable())) {
// tables of this two region should be same.
result.completeExceptionally(
new IllegalArgumentException("Cannot merge regions from two different tables "
+ tableName.get() + " and " + regionInfo.getTable()));
} else {
result.complete(tableName.get());
}
}
});
}
private CompletableFuture checkRegionsAndGetTableName(byte[][] encodedRegionNames) {
AtomicReference tableNameRef = new AtomicReference<>();
CompletableFuture future = new CompletableFuture<>();
for (byte[] encodedRegionName : encodedRegionNames) {
checkAndGetTableName(encodedRegionName, tableNameRef, future);
}
return future;
}
@Override
public CompletableFuture mergeSwitch(boolean enabled, boolean drainMerges) {
return setSplitOrMergeOn(enabled, drainMerges, MasterSwitchType.MERGE);
}
@Override
public CompletableFuture isMergeEnabled() {
return isSplitOrMergeOn(MasterSwitchType.MERGE);
}
@Override
public CompletableFuture splitSwitch(boolean enabled, boolean drainSplits) {
return setSplitOrMergeOn(enabled, drainSplits, MasterSwitchType.SPLIT);
}
@Override
public CompletableFuture isSplitEnabled() {
return isSplitOrMergeOn(MasterSwitchType.SPLIT);
}
private CompletableFuture setSplitOrMergeOn(boolean enabled, boolean synchronous,
MasterSwitchType switchType) {
SetSplitOrMergeEnabledRequest request =
RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchType);
return this. newMasterCaller()
.action((controller, stub) -> this. call(controller, stub, request,
(s, c, req, done) -> s.setSplitOrMergeEnabled(c, req, done),
(resp) -> resp.getPrevValueList().get(0)))
.call();
}
private CompletableFuture isSplitOrMergeOn(MasterSwitchType switchType) {
IsSplitOrMergeEnabledRequest request =
RequestConverter.buildIsSplitOrMergeEnabledRequest(switchType);
return this. newMasterCaller()
.action((controller, stub) -> this. call(controller, stub, request,
(s, c, req, done) -> s.isSplitOrMergeEnabled(c, req, done), (resp) -> resp.getEnabled()))
.call();
}
@Override
public CompletableFuture mergeRegions(List nameOfRegionsToMerge, boolean forcible) {
if (nameOfRegionsToMerge.size() < 2) {
return failedFuture(new IllegalArgumentException(
"Can not merge only " + nameOfRegionsToMerge.size() + " region"));
}
CompletableFuture future = new CompletableFuture<>();
byte[][] encodedNameOfRegionsToMerge =
nameOfRegionsToMerge.stream().map(this::toEncodeRegionName).toArray(byte[][]::new);
addListener(checkRegionsAndGetTableName(encodedNameOfRegionsToMerge), (tableName, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
final MergeTableRegionsRequest request;
try {
request = RequestConverter.buildMergeTableRegionsRequest(encodedNameOfRegionsToMerge,
forcible, ng.getNonceGroup(), ng.newNonce());
} catch (DeserializationException e) {
future.completeExceptionally(e);
return;
}
addListener(
this.procedureCall(tableName, request, MasterService.Interface::mergeTableRegions,
MergeTableRegionsResponse::getProcId, new MergeTableRegionProcedureBiConsumer(tableName)),
(ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
@Override
public CompletableFuture split(TableName tableName) {
CompletableFuture future = new CompletableFuture<>();
addListener(tableExists(tableName), (exist, error) -> {
if (error != null) {
future.completeExceptionally(error);
return;
}
if (!exist) {
future.completeExceptionally(new TableNotFoundException(tableName));
return;
}
addListener(
metaTable
.scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY)
.withStartRow(MetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION))
.withStopRow(MetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION))),
(results, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
return;
}
if (results != null && !results.isEmpty()) {
List> splitFutures = new ArrayList<>();
for (Result r : results) {
if (r.isEmpty() || MetaTableAccessor.getRegionInfo(r) == null) {
continue;
}
RegionLocations rl = MetaTableAccessor.getRegionLocations(r);
if (rl != null) {
for (HRegionLocation h : rl.getRegionLocations()) {
if (h != null && h.getServerName() != null) {
RegionInfo hri = h.getRegion();
if (
hri == null || hri.isSplitParent()
|| hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID
) {
continue;
}
splitFutures.add(split(hri, null));
}
}
}
}
addListener(
CompletableFuture
.allOf(splitFutures.toArray(new CompletableFuture>[splitFutures.size()])),
(ret, exception) -> {
if (exception != null) {
future.completeExceptionally(exception);
return;
}
future.complete(ret);
});
} else {
future.complete(null);
}
});
});
return future;
}
@Override
public CompletableFuture split(TableName tableName, byte[] splitPoint) {
CompletableFuture result = new CompletableFuture<>();
if (splitPoint == null) {
return failedFuture(new IllegalArgumentException("splitPoint can not be null."));
}
addListener(connection.getRegionLocator(tableName).getRegionLocation(splitPoint, true),
(loc, err) -> {
if (err != null) {
result.completeExceptionally(err);
} else if (loc == null || loc.getRegion() == null) {
result.completeExceptionally(new IllegalArgumentException(
"Region does not found: rowKey=" + Bytes.toStringBinary(splitPoint)));
} else {
addListener(splitRegion(loc.getRegion().getRegionName(), splitPoint), (ret, err2) -> {
if (err2 != null) {
result.completeExceptionally(err2);
} else {
result.complete(ret);
}
});
}
});
return result;
}
@Override
public CompletableFuture splitRegion(byte[] regionName) {
CompletableFuture future = new CompletableFuture<>();
addListener(getRegionLocation(regionName), (location, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
RegionInfo regionInfo = location.getRegion();
if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. "
+ "Replicas are auto-split when their primary is split."));
return;
}
ServerName serverName = location.getServerName();
if (serverName == null) {
future
.completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName)));
return;
}
addListener(split(regionInfo, null), (ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
@Override
public CompletableFuture splitRegion(byte[] regionName, byte[] splitPoint) {
Preconditions.checkNotNull(splitPoint,
"splitPoint is null. If you don't specify a splitPoint, use splitRegion(byte[]) instead");
CompletableFuture future = new CompletableFuture<>();
addListener(getRegionLocation(regionName), (location, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
RegionInfo regionInfo = location.getRegion();
if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. "
+ "Replicas are auto-split when their primary is split."));
return;
}
ServerName serverName = location.getServerName();
if (serverName == null) {
future
.completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName)));
return;
}
if (
regionInfo.getStartKey() != null
&& Bytes.compareTo(regionInfo.getStartKey(), splitPoint) == 0
) {
future.completeExceptionally(
new IllegalArgumentException("should not give a splitkey which equals to startkey!"));
return;
}
addListener(split(regionInfo, splitPoint), (ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
private CompletableFuture split(final RegionInfo hri, byte[] splitPoint) {
CompletableFuture future = new CompletableFuture<>();
TableName tableName = hri.getTable();
final SplitTableRegionRequest request;
try {
request = RequestConverter.buildSplitTableRegionRequest(hri, splitPoint, ng.getNonceGroup(),
ng.newNonce());
} catch (DeserializationException e) {
future.completeExceptionally(e);
return future;
}
addListener(
this.procedureCall(tableName, request, MasterService.Interface::splitRegion,
SplitTableRegionResponse::getProcId, new SplitTableRegionProcedureBiConsumer(tableName)),
(ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
return future;
}
@Override
public CompletableFuture assign(byte[] regionName) {
CompletableFuture future = new CompletableFuture<>();
addListener(getRegionInfo(regionName), (regionInfo, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
addListener(
this. newMasterCaller().priority(regionInfo.getTable())
.action((controller, stub) -> this. call(
controller, stub, RequestConverter.buildAssignRegionRequest(regionInfo.getRegionName()),
(s, c, req, done) -> s.assignRegion(c, req, done), resp -> null))
.call(),
(ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
@Override
public CompletableFuture unassign(byte[] regionName) {
CompletableFuture future = new CompletableFuture<>();
addListener(getRegionInfo(regionName), (regionInfo, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
addListener(
this. newMasterCaller().priority(regionInfo.getTable())
.action((controller, stub) -> this. call(controller, stub,
RequestConverter.buildUnassignRegionRequest(regionInfo.getRegionName()),
(s, c, req, done) -> s.unassignRegion(c, req, done), resp -> null))
.call(),
(ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
@Override
public CompletableFuture offline(byte[] regionName) {
CompletableFuture future = new CompletableFuture<>();
addListener(getRegionInfo(regionName), (regionInfo, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
addListener(this. newMasterCaller().priority(regionInfo.getTable())
.action((controller, stub) -> this. call(
controller, stub, RequestConverter.buildOfflineRegionRequest(regionInfo.getRegionName()),
(s, c, req, done) -> s.offlineRegion(c, req, done), resp -> null))
.call(), (ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
@Override
public CompletableFuture move(byte[] regionName) {
CompletableFuture future = new CompletableFuture<>();
addListener(getRegionInfo(regionName), (regionInfo, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
addListener(
moveRegion(regionInfo,
RequestConverter.buildMoveRegionRequest(regionInfo.getEncodedNameAsBytes(), null)),
(ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
@Override
public CompletableFuture move(byte[] regionName, ServerName destServerName) {
Preconditions.checkNotNull(destServerName,
"destServerName is null. If you don't specify a destServerName, use move(byte[]) instead");
CompletableFuture future = new CompletableFuture<>();
addListener(getRegionInfo(regionName), (regionInfo, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
addListener(
moveRegion(regionInfo, RequestConverter
.buildMoveRegionRequest(regionInfo.getEncodedNameAsBytes(), destServerName)),
(ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
private CompletableFuture moveRegion(RegionInfo regionInfo, MoveRegionRequest request) {
return this. newMasterCaller().priority(regionInfo.getTable())
.action(
(controller, stub) -> this. call(controller,
stub, request, (s, c, req, done) -> s.moveRegion(c, req, done), resp -> null))
.call();
}
@Override
public CompletableFuture setQuota(QuotaSettings quota) {
return this. newMasterCaller()
.action((controller, stub) -> this. call(controller,
stub, QuotaSettings.buildSetQuotaRequestProto(quota),
(s, c, req, done) -> s.setQuota(c, req, done), (resp) -> null))
.call();
}
@Override
public CompletableFuture> getQuota(QuotaFilter filter) {
CompletableFuture> future = new CompletableFuture<>();
Scan scan = QuotaTableUtil.makeScan(filter);
this.connection.getTableBuilder(QuotaTableUtil.QUOTA_TABLE_NAME).build().scan(scan,
new AdvancedScanResultConsumer() {
List settings = new ArrayList<>();
@Override
public void onNext(Result[] results, ScanController controller) {
for (Result result : results) {
try {
QuotaTableUtil.parseResultToCollection(result, settings);
} catch (IOException e) {
controller.terminate();
future.completeExceptionally(e);
}
}
}
@Override
public void onError(Throwable error) {
future.completeExceptionally(error);
}
@Override
public void onComplete() {
future.complete(settings);
}
});
return future;
}
@Override
public CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig,
boolean enabled) {
return this. procedureCall(
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled),
(s, c, req, done) -> s.addReplicationPeer(c, req, done), (resp) -> resp.getProcId(),
new ReplicationProcedureBiConsumer(peerId, () -> "ADD_REPLICATION_PEER"));
}
@Override
public CompletableFuture removeReplicationPeer(String peerId) {
return this. procedureCall(
RequestConverter.buildRemoveReplicationPeerRequest(peerId),
(s, c, req, done) -> s.removeReplicationPeer(c, req, done), (resp) -> resp.getProcId(),
new ReplicationProcedureBiConsumer(peerId, () -> "REMOVE_REPLICATION_PEER"));
}
@Override
public CompletableFuture enableReplicationPeer(String peerId) {
return this. procedureCall(
RequestConverter.buildEnableReplicationPeerRequest(peerId),
(s, c, req, done) -> s.enableReplicationPeer(c, req, done), (resp) -> resp.getProcId(),
new ReplicationProcedureBiConsumer(peerId, () -> "ENABLE_REPLICATION_PEER"));
}
@Override
public CompletableFuture disableReplicationPeer(String peerId) {
return this. procedureCall(
RequestConverter.buildDisableReplicationPeerRequest(peerId),
(s, c, req, done) -> s.disableReplicationPeer(c, req, done), (resp) -> resp.getProcId(),
new ReplicationProcedureBiConsumer(peerId, () -> "DISABLE_REPLICATION_PEER"));
}
@Override
public CompletableFuture getReplicationPeerConfig(String peerId) {
return this. newMasterCaller()
.action((controller, stub) -> this. call(controller, stub,
RequestConverter.buildGetReplicationPeerConfigRequest(peerId),
(s, c, req, done) -> s.getReplicationPeerConfig(c, req, done),
(resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig())))
.call();
}
@Override
public CompletableFuture updateReplicationPeerConfig(String peerId,
ReplicationPeerConfig peerConfig) {
return this. procedureCall(
RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig),
(s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done),
(resp) -> resp.getProcId(),
new ReplicationProcedureBiConsumer(peerId, () -> "UPDATE_REPLICATION_PEER_CONFIG"));
}
@Override
public CompletableFuture appendReplicationPeerTableCFs(String id,
Map> tableCfs) {
if (tableCfs == null) {
return failedFuture(new ReplicationException("tableCfs is null"));
}
CompletableFuture future = new CompletableFuture();
addListener(getReplicationPeerConfig(id), (peerConfig, error) -> {
if (!completeExceptionally(future, error)) {
ReplicationPeerConfig newPeerConfig =
ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(tableCfs, peerConfig);
addListener(updateReplicationPeerConfig(id, newPeerConfig), (result, err) -> {
if (!completeExceptionally(future, error)) {
future.complete(result);
}
});
}
});
return future;
}
@Override
public CompletableFuture removeReplicationPeerTableCFs(String id,
Map> tableCfs) {
if (tableCfs == null) {
return failedFuture(new ReplicationException("tableCfs is null"));
}
CompletableFuture future = new CompletableFuture();
addListener(getReplicationPeerConfig(id), (peerConfig, error) -> {
if (!completeExceptionally(future, error)) {
ReplicationPeerConfig newPeerConfig = null;
try {
newPeerConfig = ReplicationPeerConfigUtil
.removeTableCFsFromReplicationPeerConfig(tableCfs, peerConfig, id);
} catch (ReplicationException e) {
future.completeExceptionally(e);
return;
}
addListener(updateReplicationPeerConfig(id, newPeerConfig), (result, err) -> {
if (!completeExceptionally(future, error)) {
future.complete(result);
}
});
}
});
return future;
}
@Override
public CompletableFuture> listReplicationPeers() {
return listReplicationPeers(RequestConverter.buildListReplicationPeersRequest(null));
}
@Override
public CompletableFuture> listReplicationPeers(Pattern pattern) {
Preconditions.checkNotNull(pattern,
"pattern is null. If you don't specify a pattern, use listReplicationPeers() instead");
return listReplicationPeers(RequestConverter.buildListReplicationPeersRequest(pattern));
}
private CompletableFuture>
listReplicationPeers(ListReplicationPeersRequest request) {
return this.> newMasterCaller()
.action((controller, stub) -> this.> call(controller, stub, request,
(s, c, req, done) -> s.listReplicationPeers(c, req, done),
(resp) -> resp.getPeerDescList().stream()
.map(ReplicationPeerConfigUtil::toReplicationPeerDescription)
.collect(Collectors.toList())))
.call();
}
@Override
public CompletableFuture> listReplicatedTableCFs() {
CompletableFuture> future = new CompletableFuture>();
addListener(listTableDescriptors(), (tables, error) -> {
if (!completeExceptionally(future, error)) {
List replicatedTableCFs = new ArrayList<>();
tables.forEach(table -> {
Map cfs = new HashMap<>();
Stream.of(table.getColumnFamilies())
.filter(column -> column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL)
.forEach(column -> {
cfs.put(column.getNameAsString(), column.getScope());
});
if (!cfs.isEmpty()) {
replicatedTableCFs.add(new TableCFs(table.getTableName(), cfs));
}
});
future.complete(replicatedTableCFs);
}
});
return future;
}
@Override
public CompletableFuture snapshot(SnapshotDescription snapshotDesc) {
SnapshotProtos.SnapshotDescription snapshot =
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc);
try {
ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
} catch (IllegalArgumentException e) {
return failedFuture(e);
}
CompletableFuture future = new CompletableFuture<>();
final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot).build();
addListener(this. newMasterCaller()
.action((controller, stub) -> this. call(controller,
stub, request, (s, c, req, done) -> s.snapshot(c, req, done),
resp -> resp.getExpectedTimeout()))
.call(), (expectedTimeout, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
TimerTask pollingTask = new TimerTask() {
int tries = 0;
long startTime = EnvironmentEdgeManager.currentTime();
long endTime = startTime + expectedTimeout;
long maxPauseTime = expectedTimeout / maxAttempts;
@Override
public void run(Timeout timeout) throws Exception {
if (EnvironmentEdgeManager.currentTime() < endTime) {
addListener(isSnapshotFinished(snapshotDesc), (done, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else if (done) {
future.complete(null);
} else {
// retry again after pauseTime.
long pauseTime =
ConnectionUtils.getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries);
pauseTime = Math.min(pauseTime, maxPauseTime);
AsyncConnectionImpl.RETRY_TIMER.newTimeout(this, pauseTime,
TimeUnit.MILLISECONDS);
}
});
} else {
future.completeExceptionally(
new SnapshotCreationException("Snapshot '" + snapshot.getName()
+ "' wasn't completed in expectedTime:" + expectedTimeout + " ms", snapshotDesc));
}
}
};
AsyncConnectionImpl.RETRY_TIMER.newTimeout(pollingTask, 1, TimeUnit.MILLISECONDS);
});
return future;
}
@Override
public CompletableFuture isSnapshotFinished(SnapshotDescription snapshot) {
return this. newMasterCaller()
.action((controller, stub) -> this. call(controller, stub,
IsSnapshotDoneRequest.newBuilder()
.setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(),
(s, c, req, done) -> s.isSnapshotDone(c, req, done), resp -> resp.getDone()))
.call();
}
@Override
public CompletableFuture