Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import com.google.protobuf.RpcChannel;
import java.util.Collection;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import java.util.regex.Pattern;
import org.apache.hadoop.hbase.CacheEvictionStats;
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.RegionMetrics;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.replication.TableCFs;
import org.apache.hadoop.hbase.client.security.SecurityCapability;
import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaSettings;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.yetus.audience.InterfaceAudience;
/**
* The asynchronous administrative API for HBase.
* @since 2.0.0
*/
@InterfaceAudience.Public
public interface AsyncAdmin {
/**
* @param tableName Table to check.
* @return True if table exists already. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
CompletableFuture tableExists(TableName tableName);
/**
* List all the userspace tables.
* @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}.
*/
default CompletableFuture> listTableDescriptors() {
return listTableDescriptors(false);
}
/**
* List all the tables.
* @param includeSysTables False to match only against userspace tables
* @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}.
*/
CompletableFuture> listTableDescriptors(boolean includeSysTables);
/**
* List all the tables matching the given pattern.
* @param pattern The compiled regular expression to match against
* @param includeSysTables False to match only against userspace tables
* @return - returns a list of TableDescriptors wrapped by a {@link CompletableFuture}.
*/
CompletableFuture> listTableDescriptors(Pattern pattern,
boolean includeSysTables);
/**
* Get list of table descriptors by namespace.
* @param name namespace name
* @return returns a list of TableDescriptors wrapped by a {@link CompletableFuture}.
*/
CompletableFuture> listTableDescriptorsByNamespace(String name);
/**
* List all of the names of userspace tables.
* @return a list of table names wrapped by a {@link CompletableFuture}.
* @see #listTableNames(Pattern, boolean)
*/
default CompletableFuture> listTableNames() {
return listTableNames(false);
}
/**
* List all of the names of tables.
* @param includeSysTables False to match only against userspace tables
* @return a list of table names wrapped by a {@link CompletableFuture}.
*/
CompletableFuture> listTableNames(boolean includeSysTables);
/**
* List all of the names of userspace tables.
* @param pattern The regular expression to match against
* @param includeSysTables False to match only against userspace tables
* @return a list of table names wrapped by a {@link CompletableFuture}.
*/
CompletableFuture> listTableNames(Pattern pattern, boolean includeSysTables);
/**
* Get list of table names by namespace.
* @param name namespace name
* @return The list of table names in the namespace wrapped by a {@link CompletableFuture}.
*/
CompletableFuture> listTableNamesByNamespace(String name);
/**
* Method for getting the tableDescriptor
* @param tableName as a {@link TableName}
* @return the read-only tableDescriptor wrapped by a {@link CompletableFuture}.
*/
CompletableFuture getDescriptor(TableName tableName);
/**
* Creates a new table.
* @param desc table descriptor for table
*/
CompletableFuture createTable(TableDescriptor desc);
/**
* Creates a new table with the specified number of regions. The start key specified will become
* the end key of the first region of the table, and the end key specified will become the start
* key of the last region of the table (the first region has a null start key and the last region
* has a null end key). BigInteger math will be used to divide the key range specified into enough
* segments to make the required number of total regions.
* @param desc table descriptor for table
* @param startKey beginning of key range
* @param endKey end of key range
* @param numRegions the total number of regions to create
*/
CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[] endKey,
int numRegions);
/**
* Creates a new table with an initial set of empty regions defined by the specified split keys.
* The total number of regions created will be the number of split keys plus one.
* Note : Avoid passing empty split key.
* @param desc table descriptor for table
* @param splitKeys array of split keys for the initial regions of the table
*/
CompletableFuture createTable(TableDescriptor desc, byte[][] splitKeys);
/**
* Modify an existing table, more IRB friendly version.
* @param desc modified description of the table
*/
CompletableFuture modifyTable(TableDescriptor desc);
/**
* Deletes a table.
* @param tableName name of table to delete
*/
CompletableFuture deleteTable(TableName tableName);
/**
* Truncate a table.
* @param tableName name of table to truncate
* @param preserveSplits True if the splits should be preserved
*/
CompletableFuture truncateTable(TableName tableName, boolean preserveSplits);
/**
* Enable a table. The table has to be in disabled state for it to be enabled.
* @param tableName name of the table
*/
CompletableFuture enableTable(TableName tableName);
/**
* Disable a table. The table has to be in enabled state for it to be disabled.
* @param tableName
*/
CompletableFuture disableTable(TableName tableName);
/**
* @param tableName name of table to check
* @return true if table is on-line. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
CompletableFuture isTableEnabled(TableName tableName);
/**
* @param tableName name of table to check
* @return true if table is off-line. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
CompletableFuture isTableDisabled(TableName tableName);
/**
* @param tableName name of table to check
* @return true if all regions of the table are available. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
CompletableFuture isTableAvailable(TableName tableName);
/**
* Use this api to check if the table has been created with the specified number of splitkeys
* which was used while creating the given table. Note : If this api is used after a table's
* region gets splitted, the api may return false. The return value will be wrapped by a
* {@link CompletableFuture}.
* @param tableName name of table to check
* @param splitKeys keys to check if the table has been created with all split keys
*/
CompletableFuture isTableAvailable(TableName tableName, byte[][] splitKeys);
/**
* Add a column family to an existing table.
* @param tableName name of the table to add column family to
* @param columnFamily column family descriptor of column family to be added
*/
CompletableFuture addColumnFamily(TableName tableName,
ColumnFamilyDescriptor columnFamily);
/**
* Delete a column family from a table.
* @param tableName name of table
* @param columnFamily name of column family to be deleted
*/
CompletableFuture deleteColumnFamily(TableName tableName, byte[] columnFamily);
/**
* Modify an existing column family on a table.
* @param tableName name of table
* @param columnFamily new column family descriptor to use
*/
CompletableFuture modifyColumnFamily(TableName tableName,
ColumnFamilyDescriptor columnFamily);
/**
* Create a new namespace.
* @param descriptor descriptor which describes the new namespace
*/
CompletableFuture createNamespace(NamespaceDescriptor descriptor);
/**
* Modify an existing namespace.
* @param descriptor descriptor which describes the new namespace
*/
CompletableFuture modifyNamespace(NamespaceDescriptor descriptor);
/**
* Delete an existing namespace. Only empty namespaces (no tables) can be removed.
* @param name namespace name
*/
CompletableFuture deleteNamespace(String name);
/**
* Get a namespace descriptor by name
* @param name name of namespace descriptor
* @return A descriptor wrapped by a {@link CompletableFuture}.
*/
CompletableFuture getNamespaceDescriptor(String name);
/**
* List available namespace descriptors
* @return List of descriptors wrapped by a {@link CompletableFuture}.
*/
CompletableFuture> listNamespaceDescriptors();
/**
* Get all the online regions on a region server.
*/
CompletableFuture> getRegions(ServerName serverName);
/**
* Get the regions of a given table.
*/
CompletableFuture> getRegions(TableName tableName);
/**
* Flush a table.
* @param tableName table to flush
*/
CompletableFuture flush(TableName tableName);
/**
* Flush an individual region.
* @param regionName region to flush
*/
CompletableFuture flushRegion(byte[] regionName);
/**
* Flush all region on the region server.
* @param serverName server to flush
*/
CompletableFuture flushRegionServer(ServerName serverName);
/**
* Compact a table. When the returned CompletableFuture is done, it only means the compact request
* was sent to HBase and may need some time to finish the compact operation.
* @param tableName table to compact
*/
default CompletableFuture compact(TableName tableName) {
return compact(tableName, CompactType.NORMAL);
}
/**
* Compact a column family within a table. When the returned CompletableFuture is done, it only
* means the compact request was sent to HBase and may need some time to finish the compact
* operation.
* @param tableName table to compact
* @param columnFamily column family within a table. If not present, compact the table's all
* column families.
*/
default CompletableFuture compact(TableName tableName, byte[] columnFamily) {
return compact(tableName, columnFamily, CompactType.NORMAL);
}
/**
* Compact a table. When the returned CompletableFuture is done, it only means the compact request
* was sent to HBase and may need some time to finish the compact operation.
* @param tableName table to compact
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
*/
CompletableFuture compact(TableName tableName, CompactType compactType);
/**
* Compact a column family within a table. When the returned CompletableFuture is done, it only
* means the compact request was sent to HBase and may need some time to finish the compact
* operation.
* @param tableName table to compact
* @param columnFamily column family within a table
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
*/
CompletableFuture compact(TableName tableName, byte[] columnFamily,
CompactType compactType);
/**
* Compact an individual region. When the returned CompletableFuture is done, it only means the
* compact request was sent to HBase and may need some time to finish the compact operation.
* @param regionName region to compact
*/
CompletableFuture compactRegion(byte[] regionName);
/**
* Compact a column family within a region. When the returned CompletableFuture is done, it only
* means the compact request was sent to HBase and may need some time to finish the compact
* operation.
* @param regionName region to compact
* @param columnFamily column family within a region. If not present, compact the region's all
* column families.
*/
CompletableFuture compactRegion(byte[] regionName, byte[] columnFamily);
/**
* Major compact a table. When the returned CompletableFuture is done, it only means the compact
* request was sent to HBase and may need some time to finish the compact operation.
* @param tableName table to major compact
*/
default CompletableFuture majorCompact(TableName tableName) {
return majorCompact(tableName, CompactType.NORMAL);
}
/**
* Major compact a column family within a table. When the returned CompletableFuture is done, it
* only means the compact request was sent to HBase and may need some time to finish the compact
* operation.
* @param tableName table to major compact
* @param columnFamily column family within a table. If not present, major compact the table's all
* column families.
*/
default CompletableFuture majorCompact(TableName tableName, byte[] columnFamily) {
return majorCompact(tableName, columnFamily, CompactType.NORMAL);
}
/**
* Major compact a table. When the returned CompletableFuture is done, it only means the compact
* request was sent to HBase and may need some time to finish the compact operation.
* @param tableName table to major compact
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
*/
CompletableFuture majorCompact(TableName tableName, CompactType compactType);
/**
* Major compact a column family within a table. When the returned CompletableFuture is done, it
* only means the compact request was sent to HBase and may need some time to finish the compact
* operation.
* @param tableName table to major compact
* @param columnFamily column family within a table. If not present, major compact the table's all
* column families.
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
*/
CompletableFuture majorCompact(TableName tableName, byte[] columnFamily,
CompactType compactType);
/**
* Major compact a region. When the returned CompletableFuture is done, it only means the compact
* request was sent to HBase and may need some time to finish the compact operation.
* @param regionName region to major compact
*/
CompletableFuture majorCompactRegion(byte[] regionName);
/**
* Major compact a column family within region. When the returned CompletableFuture is done, it
* only means the compact request was sent to HBase and may need some time to finish the compact
* operation.
* @param regionName region to major compact
* @param columnFamily column family within a region. If not present, major compact the region's
* all column families.
*/
CompletableFuture majorCompactRegion(byte[] regionName, byte[] columnFamily);
/**
* Compact all regions on the region server.
* @param serverName the region server name
*/
CompletableFuture compactRegionServer(ServerName serverName);
/**
* Compact all regions on the region server.
* @param serverName the region server name
*/
CompletableFuture majorCompactRegionServer(ServerName serverName);
/**
* Turn the Merge switch on or off.
* @param on
* @return Previous switch value wrapped by a {@link CompletableFuture}
*/
CompletableFuture mergeSwitch(boolean on);
/**
* Query the current state of the Merge switch.
* @return true if the switch is on, false otherwise. The return value will be wrapped by a
* {@link CompletableFuture}
*/
CompletableFuture isMergeEnabled();
/**
* Turn the Split switch on or off.
* @param on
* @return Previous switch value wrapped by a {@link CompletableFuture}
*/
CompletableFuture splitSwitch(boolean on);
/**
* Query the current state of the Split switch.
* @return true if the switch is on, false otherwise. The return value will be wrapped by a
* {@link CompletableFuture}
*/
CompletableFuture isSplitEnabled();
/**
* Merge two regions.
* @param nameOfRegionA encoded or full name of region a
* @param nameOfRegionB encoded or full name of region b
* @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent
* regions
*/
CompletableFuture mergeRegions(byte[] nameOfRegionA, byte[] nameOfRegionB,
boolean forcible);
/**
* Split a table. The method will execute split action for each region in table.
* @param tableName table to split
*/
CompletableFuture split(TableName tableName);
/**
* Split an individual region.
* @param regionName region to split
*/
CompletableFuture splitRegion(byte[] regionName);
/**
* Split a table.
* @param tableName table to split
* @param splitPoint the explicit position to split on
*/
CompletableFuture split(TableName tableName, byte[] splitPoint);
/**
* Split an individual region.
* @param regionName region to split
* @param splitPoint the explicit position to split on. If not present, it will decide by region
* server.
*/
CompletableFuture splitRegion(byte[] regionName, byte[] splitPoint);
/**
* @param regionName Encoded or full name of region to assign.
*/
CompletableFuture assign(byte[] regionName);
/**
* Unassign a region from current hosting regionserver. Region will then be assigned to a
* regionserver chosen at random. Region could be reassigned back to the same server. Use
* {@link #move(byte[], ServerName)} if you want to control the region movement.
* @param regionName Encoded or full name of region to unassign. Will clear any existing
* RegionPlan if one found.
* @param forcible If true, force unassign (Will remove region from regions-in-transition too if
* present. If results in double assignment use hbck -fix to resolve. To be used by
* experts).
*/
CompletableFuture unassign(byte[] regionName, boolean forcible);
/**
* Offline specified region from master's in-memory state. It will not attempt to reassign the
* region as in unassign. This API can be used when a region not served by any region server and
* still online as per Master's in memory state. If this API is incorrectly used on active region
* then master will loose track of that region. This is a special method that should be used by
* experts or hbck.
* @param regionName Encoded or full name of region to offline
*/
CompletableFuture offline(byte[] regionName);
/**
* Move the region r to a random server.
* @param regionName Encoded or full name of region to move.
*/
CompletableFuture move(byte[] regionName);
/**
* Move the region r to dest.
* @param regionName Encoded or full name of region to move.
* @param destServerName The servername of the destination regionserver. If not present, we'll
* assign to a random server. A server name is made of host, port and startcode. Here is
* an example: host187.example.com,60020,1289493121758
*/
CompletableFuture move(byte[] regionName, ServerName destServerName);
/**
* Apply the new quota settings.
* @param quota the quota settings
*/
CompletableFuture setQuota(QuotaSettings quota);
/**
* List the quotas based on the filter.
* @param filter the quota settings filter
* @return the QuotaSetting list, which wrapped by a CompletableFuture.
*/
CompletableFuture> getQuota(QuotaFilter filter);
/**
* Add a new replication peer for replicating data to slave cluster
* @param peerId a short name that identifies the peer
* @param peerConfig configuration for the replication slave cluster
*/
default CompletableFuture addReplicationPeer(String peerId,
ReplicationPeerConfig peerConfig) {
return addReplicationPeer(peerId, peerConfig, true);
}
/**
* Add a new replication peer for replicating data to slave cluster
* @param peerId a short name that identifies the peer
* @param peerConfig configuration for the replication slave cluster
* @param enabled peer state, true if ENABLED and false if DISABLED
*/
CompletableFuture addReplicationPeer(String peerId,
ReplicationPeerConfig peerConfig, boolean enabled);
/**
* Remove a peer and stop the replication
* @param peerId a short name that identifies the peer
*/
CompletableFuture removeReplicationPeer(String peerId);
/**
* Restart the replication stream to the specified peer
* @param peerId a short name that identifies the peer
*/
CompletableFuture enableReplicationPeer(String peerId);
/**
* Stop the replication stream to the specified peer
* @param peerId a short name that identifies the peer
*/
CompletableFuture disableReplicationPeer(String peerId);
/**
* Returns the configured ReplicationPeerConfig for the specified peer
* @param peerId a short name that identifies the peer
* @return ReplicationPeerConfig for the peer wrapped by a {@link CompletableFuture}.
*/
CompletableFuture getReplicationPeerConfig(String peerId);
/**
* Update the peerConfig for the specified peer
* @param peerId a short name that identifies the peer
* @param peerConfig new config for the peer
*/
CompletableFuture updateReplicationPeerConfig(String peerId,
ReplicationPeerConfig peerConfig);
/**
* Append the replicable table-cf config of the specified peer
* @param peerId a short that identifies the cluster
* @param tableCfs A map from tableName to column family names
*/
CompletableFuture appendReplicationPeerTableCFs(String peerId,
Map> tableCfs);
/**
* Remove some table-cfs from config of the specified peer
* @param peerId a short name that identifies the cluster
* @param tableCfs A map from tableName to column family names
*/
CompletableFuture removeReplicationPeerTableCFs(String peerId,
Map> tableCfs);
/**
* Return a list of replication peers.
* @return a list of replication peers description. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
CompletableFuture> listReplicationPeers();
/**
* Return a list of replication peers.
* @param pattern The compiled regular expression to match peer id
* @return a list of replication peers description. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
CompletableFuture> listReplicationPeers(Pattern pattern);
/**
* Find all table and column families that are replicated from this cluster
* @return the replicated table-cfs list of this cluster. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
CompletableFuture> listReplicatedTableCFs();
/**
* Enable a table's replication switch.
* @param tableName name of the table
*/
CompletableFuture enableTableReplication(TableName tableName);
/**
* Disable a table's replication switch.
* @param tableName name of the table
*/
CompletableFuture disableTableReplication(TableName tableName);
/**
* Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
* taken. If the table is disabled, an offline snapshot is taken. Snapshots are considered unique
* based on the name of the snapshot. Attempts to take a snapshot with the same name (even
* a different type or with different parameters) will fail with a
* {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate
* naming. Snapshot names follow the same naming constraints as tables in HBase. See
* {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
* @param snapshotName name of the snapshot to be created
* @param tableName name of the table for which snapshot is created
*/
default CompletableFuture snapshot(String snapshotName, TableName tableName) {
return snapshot(snapshotName, tableName, SnapshotType.FLUSH);
}
/**
* Create typed snapshot of the table. Snapshots are considered unique based on the name of the
* snapshot. Attempts to take a snapshot with the same name (even a different type or with
* different parameters) will fail with a
* {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} indicating the duplicate
* naming. Snapshot names follow the same naming constraints as tables in HBase. See
* {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
* @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
* snapshots stored on the cluster
* @param tableName name of the table to snapshot
* @param type type of snapshot to take
*/
default CompletableFuture snapshot(String snapshotName, TableName tableName,
SnapshotType type) {
return snapshot(new SnapshotDescription(snapshotName, tableName, type));
}
/**
* Take a snapshot and wait for the server to complete that snapshot asynchronously. Only a single
* snapshot should be taken at a time for an instance of HBase, or results may be undefined (you
* can tell multiple HBase clusters to snapshot at the same time, but only one at a time for a
* single cluster). Snapshots are considered unique based on the name of the snapshot.
* Attempts to take a snapshot with the same name (even a different type or with different
* parameters) will fail with a {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException}
* indicating the duplicate naming. Snapshot names follow the same naming constraints as tables in
* HBase. See {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
* You should probably use {@link #snapshot(String, org.apache.hadoop.hbase.TableName)} unless you
* are sure about the type of snapshot that you want to take.
* @param snapshot snapshot to take
*/
CompletableFuture snapshot(SnapshotDescription snapshot);
/**
* Check the current state of the passed snapshot. There are three possible states:
*
*
running - returns false
*
finished - returns true
*
finished with error - throws the exception that caused the snapshot to fail
*
* The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
* run/started since the snapshot you are checking, you will receive an
* {@link org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}.
* @param snapshot description of the snapshot to check
* @return true if the snapshot is completed, false if the snapshot is still
* running
*/
CompletableFuture isSnapshotFinished(SnapshotDescription snapshot);
/**
* Restore the specified snapshot on the original table. (The table must be disabled) If the
* "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to true, a
* snapshot of the current table is taken before executing the restore operation. In case of
* restore failure, the failsafe snapshot will be restored. If the restore completes without
* problem the failsafe snapshot is deleted.
* @param snapshotName name of the snapshot to restore
*/
CompletableFuture restoreSnapshot(String snapshotName);
/**
* Restore the specified snapshot on the original table. (The table must be disabled) If
* 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
* executing the restore operation. In case of restore failure, the failsafe snapshot will be
* restored. If the restore completes without problem the failsafe snapshot is deleted. The
* failsafe snapshot name is configurable by using the property
* "hbase.snapshot.restore.failsafe.name".
* @param snapshotName name of the snapshot to restore
* @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
*/
CompletableFuture restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot);
/**
* Create a new table by cloning the snapshot content.
* @param snapshotName name of the snapshot to be cloned
* @param tableName name of the table where the snapshot will be restored
*/
CompletableFuture cloneSnapshot(String snapshotName, TableName tableName);
/**
* List completed snapshots.
* @return a list of snapshot descriptors for completed snapshots wrapped by a
* {@link CompletableFuture}
*/
CompletableFuture> listSnapshots();
/**
* List all the completed snapshots matching the given pattern.
* @param pattern The compiled regular expression to match against
* @return - returns a List of SnapshotDescription wrapped by a {@link CompletableFuture}
*/
CompletableFuture> listSnapshots(Pattern pattern);
/**
* List all the completed snapshots matching the given table name pattern.
* @param tableNamePattern The compiled table name regular expression to match against
* @return - returns a List of completed SnapshotDescription wrapped by a
* {@link CompletableFuture}
*/
CompletableFuture> listTableSnapshots(Pattern tableNamePattern);
/**
* List all the completed snapshots matching the given table name regular expression and snapshot
* name regular expression.
* @param tableNamePattern The compiled table name regular expression to match against
* @param snapshotNamePattern The compiled snapshot name regular expression to match against
* @return - returns a List of completed SnapshotDescription wrapped by a
* {@link CompletableFuture}
*/
CompletableFuture> listTableSnapshots(Pattern tableNamePattern,
Pattern snapshotNamePattern);
/**
* Delete an existing snapshot.
* @param snapshotName name of the snapshot
*/
CompletableFuture deleteSnapshot(String snapshotName);
/**
* Delete all existing snapshots.
*/
CompletableFuture deleteSnapshots();
/**
* Delete existing snapshots whose names match the pattern passed.
* @param pattern pattern for names of the snapshot to match
*/
CompletableFuture deleteSnapshots(Pattern pattern);
/**
* Delete all existing snapshots matching the given table name pattern.
* @param tableNamePattern The compiled table name regular expression to match against
*/
CompletableFuture deleteTableSnapshots(Pattern tableNamePattern);
/**
* Delete all existing snapshots matching the given table name regular expression and snapshot
* name regular expression.
* @param tableNamePattern The compiled table name regular expression to match against
* @param snapshotNamePattern The compiled snapshot name regular expression to match against
*/
CompletableFuture deleteTableSnapshots(Pattern tableNamePattern,
Pattern snapshotNamePattern);
/**
* Execute a distributed procedure on a cluster.
* @param signature A distributed procedure is uniquely identified by its signature (default the
* root ZK node name of the procedure).
* @param instance The instance name of the procedure. For some procedures, this parameter is
* optional.
* @param props Property/Value pairs of properties passing to the procedure
*/
CompletableFuture execProcedure(String signature, String instance,
Map props);
/**
* Execute a distributed procedure on a cluster.
* @param signature A distributed procedure is uniquely identified by its signature (default the
* root ZK node name of the procedure).
* @param instance The instance name of the procedure. For some procedures, this parameter is
* optional.
* @param props Property/Value pairs of properties passing to the procedure
* @return data returned after procedure execution. null if no return data.
*/
CompletableFuture execProcedureWithReturn(String signature, String instance,
Map props);
/**
* Check the current state of the specified procedure. There are three possible states:
*
*
running - returns false
*
finished - returns true
*
finished with error - throws the exception that caused the procedure to fail
*
* @param signature The signature that uniquely identifies a procedure
* @param instance The instance name of the procedure
* @param props Property/Value pairs of properties passing to the procedure
* @return true if the specified procedure is finished successfully, false if it is still running.
* The value is wrapped by {@link CompletableFuture}
*/
CompletableFuture isProcedureFinished(String signature, String instance,
Map props);
/**
* Abort a procedure
* Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2.
* @param procId ID of the procedure to abort
* @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
* @return true if aborted, false if procedure already completed or does not exist. the value is
* wrapped by {@link CompletableFuture}
* @deprecated Since 2.1.1 -- to be removed.
*/
@Deprecated
CompletableFuture abortProcedure(long procId, boolean mayInterruptIfRunning);
/**
* List procedures
* @return procedure list JSON wrapped by {@link CompletableFuture}
*/
CompletableFuture getProcedures();
/**
* List locks.
* @return lock list JSON wrapped by {@link CompletableFuture}
*/
CompletableFuture getLocks();
/**
* Mark region server(s) as decommissioned to prevent additional regions from getting
* assigned to them. Optionally unload the regions on the servers. If there are multiple servers
* to be decommissioned, decommissioning them at the same time can prevent wasteful region
* movements. Region unloading is asynchronous.
* @param servers The list of servers to decommission.
* @param offload True to offload the regions from the decommissioned servers
*/
CompletableFuture decommissionRegionServers(List servers, boolean offload);
/**
* List region servers marked as decommissioned, which can not be assigned regions.
* @return List of decommissioned region servers wrapped by {@link CompletableFuture}
*/
CompletableFuture> listDecommissionedRegionServers();
/**
* Remove decommission marker from a region server to allow regions assignments. Load regions onto
* the server if a list of regions is given. Region loading is asynchronous.
* @param server The server to recommission.
* @param encodedRegionNames Regions to load onto the server.
*/
CompletableFuture recommissionRegionServer(ServerName server,
List encodedRegionNames);
/**
* @return cluster status wrapped by {@link CompletableFuture}
*/
CompletableFuture getClusterMetrics();
/**
* @return cluster status wrapped by {@link CompletableFuture}
*/
CompletableFuture getClusterMetrics(EnumSet