org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils Maven / Gradle / Ivy
The newest version!
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;
import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.ListMultimap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessControlLists;
import org.apache.hadoop.hbase.security.access.TablePermission;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
/**
* Utility class to help manage {@link SnapshotDescription SnapshotDesriptions}.
*
* Snapshots are laid out on disk like this:
*
*
* /hbase/.snapshots
* /.tmp <---- working directory
* /[snapshot name] <----- completed snapshot
*
*
* A completed snapshot named 'completed' then looks like (multiple regions, servers, files, etc.
* signified by '...' on the same directory depth).
*
*
* /hbase/.snapshots/completed
* .snapshotinfo <--- Description of the snapshot
* .tableinfo <--- Copy of the tableinfo
* /.logs
* /[server_name]
* /... [log files]
* ...
* /[region name] <---- All the region's information
* .regioninfo <---- Copy of the HRegionInfo
* /[column family name]
* /[hfile name] <--- name of the hfile in the real region
* ...
* ...
* ...
*
*
* Utility methods in this class are useful for getting the correct locations for different parts of
* the snapshot, as well as moving completed snapshots into place (see
* {@link #completeSnapshot}, and writing the
* {@link SnapshotDescription} to the working snapshot directory.
*/
@InterfaceAudience.Private
public final class SnapshotDescriptionUtils {
/**
* Filter that only accepts completed snapshot directories
*/
public static class CompletedSnaphotDirectoriesFilter extends FSUtils.BlackListDirFilter {
/**
* @param fs
*/
public CompletedSnaphotDirectoriesFilter(FileSystem fs) {
super(fs, Collections.singletonList(SNAPSHOT_TMP_DIR_NAME));
}
}
private static final Log LOG = LogFactory.getLog(SnapshotDescriptionUtils.class);
/**
* Version of the fs layout for a snapshot. Future snapshots may have different file layouts,
* which we may need to read in differently.
*/
public static final int SNAPSHOT_LAYOUT_VERSION = SnapshotManifestV2.DESCRIPTOR_VERSION;
// snapshot directory constants
/**
* The file contains the snapshot basic information and it is under the directory of a snapshot.
*/
public static final String SNAPSHOTINFO_FILE = ".snapshotinfo";
/** Temporary directory under the snapshot directory to store in-progress snapshots */
public static final String SNAPSHOT_TMP_DIR_NAME = ".tmp";
/**
* The configuration property that determines the filepath of the snapshot
* base working directory
*/
public static final String SNAPSHOT_WORKING_DIR = "hbase.snapshot.working.dir";
// snapshot operation values
/** Default value if no start time is specified */
public static final long NO_SNAPSHOT_START_TIME_SPECIFIED = 0;
// Default value if no ttl is specified for Snapshot
private static final long NO_SNAPSHOT_TTL_SPECIFIED = 0;
public static final String MASTER_SNAPSHOT_TIMEOUT_MILLIS = "hbase.snapshot.master.timeout.millis";
/** By default, wait 300 seconds for a snapshot to complete */
public static final long DEFAULT_MAX_WAIT_TIME = 60000 * 5 ;
/**
* By default, check to see if the snapshot is complete (ms)
* @deprecated Use {@link #DEFAULT_MAX_WAIT_TIME} instead.
* */
@Deprecated
public static final int SNAPSHOT_TIMEOUT_MILLIS_DEFAULT = 60000 * 5;
/**
* Conf key for # of ms elapsed before injecting a snapshot timeout error when waiting for
* completion.
* @deprecated Use {@link #MASTER_SNAPSHOT_TIMEOUT_MILLIS} instead.
*/
@Deprecated
public static final String SNAPSHOT_TIMEOUT_MILLIS_KEY = "hbase.snapshot.master.timeoutMillis";
private SnapshotDescriptionUtils() {
// private constructor for utility class
}
/**
* @param conf {@link Configuration} from which to check for the timeout
* @param type type of snapshot being taken
* @param defaultMaxWaitTime Default amount of time to wait, if none is in the configuration
* @return the max amount of time the master should wait for a snapshot to complete
*/
public static long getMaxMasterTimeout(Configuration conf, SnapshotDescription.Type type,
long defaultMaxWaitTime) {
String confKey;
switch (type) {
case DISABLED:
default:
confKey = MASTER_SNAPSHOT_TIMEOUT_MILLIS;
}
return Math.max(conf.getLong(confKey, defaultMaxWaitTime),
conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, defaultMaxWaitTime));
}
/**
* Get the snapshot root directory. All the snapshots are kept under this directory, i.e.
* ${hbase.rootdir}/.snapshot
* @param rootDir hbase root directory
* @return the base directory in which all snapshots are kept
*/
public static Path getSnapshotRootDir(final Path rootDir) {
return new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
}
/**
* Get the directory for a specified snapshot. This directory is a sub-directory of snapshot root
* directory and all the data files for a snapshot are kept under this directory.
* @param snapshot snapshot being taken
* @param rootDir hbase root directory
* @return the final directory for the completed snapshot
*/
public static Path getCompletedSnapshotDir(final SnapshotDescription snapshot, final Path rootDir) {
return getCompletedSnapshotDir(snapshot.getName(), rootDir);
}
/**
* Get the directory for a completed snapshot. This directory is a sub-directory of snapshot root
* directory and all the data files for a snapshot are kept under this directory.
* @param snapshotName name of the snapshot being taken
* @param rootDir hbase root directory
* @return the final directory for the completed snapshot
*/
public static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir) {
return getSpecifiedSnapshotDir(getSnapshotsDir(rootDir), snapshotName);
}
/**
* Get the general working directory for snapshots - where they are built, where they are
* temporarily copied on export, etc.
* @param rootDir root directory of the HBase installation
* @param conf Configuration of the HBase instance
* @return Path to the snapshot tmp directory, relative to the passed root directory
*/
public static Path getWorkingSnapshotDir(final Path rootDir, final Configuration conf) {
return new Path(conf.get(SNAPSHOT_WORKING_DIR,
getDefaultWorkingSnapshotDir(rootDir).toString()));
}
/**
* Get the directory to build a snapshot, before it is finalized
* @param snapshot snapshot that will be built
* @param rootDir root directory of the hbase installation
* @param conf Configuration of the HBase instance
* @return {@link Path} where one can build a snapshot
*/
public static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir,
Configuration conf) {
return getWorkingSnapshotDir(snapshot.getName(), rootDir, conf);
}
/**
* Get the directory to build a snapshot, before it is finalized
* @param snapshotName name of the snapshot
* @param rootDir root directory of the hbase installation
* @param conf Configuration of the HBase instance
* @return {@link Path} where one can build a snapshot
*/
public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir,
Configuration conf) {
return getSpecifiedSnapshotDir(getWorkingSnapshotDir(rootDir, conf), snapshotName);
}
/**
* Get the directory within the given filepath to store the snapshot instance
* @param snapshotsDir directory to store snapshot directory within
* @param snapshotName name of the snapshot to take
* @return the final directory for the snapshot in the given filepath
*/
private static final Path getSpecifiedSnapshotDir(final Path snapshotsDir, String snapshotName) {
return new Path(snapshotsDir, snapshotName);
}
/**
* @param rootDir hbase root directory
* @return the directory for all completed snapshots;
*/
public static final Path getSnapshotsDir(Path rootDir) {
return new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
}
/**
* Determines if the given workingDir is a subdirectory of the given "root directory"
* @param workingDir a directory to check
* @param rootDir root directory of the HBase installation
* @return true if the given workingDir is a subdirectory of the given root directory,
* false otherwise
*/
public static boolean isSubDirectoryOf(final Path workingDir, final Path rootDir) {
return workingDir.toString().startsWith(rootDir.toString() + Path.SEPARATOR);
}
/**
* Determines if the given workingDir is a subdirectory of the default working snapshot directory
* @param workingDir a directory to check
* @param conf configuration for the HBase cluster
* @return true if the given workingDir is a subdirectory of the default working directory for
* snapshots, false otherwise
*/
public static boolean isWithinDefaultWorkingDir(final Path workingDir, Configuration conf)
throws IOException {
Path defaultWorkingDir = getDefaultWorkingSnapshotDir(FSUtils.getRootDir(conf));
return workingDir.equals(defaultWorkingDir) || isSubDirectoryOf(workingDir, defaultWorkingDir);
}
/**
* Get the default working directory for snapshots - where they are built, where they are
* temporarily copied on export, etc.
* @param rootDir root directory of the HBase installation
* @return Path to the default snapshot tmp directory, relative to the passed root directory
*/
private static Path getDefaultWorkingSnapshotDir(final Path rootDir) {
return new Path(getSnapshotsDir(rootDir), SNAPSHOT_TMP_DIR_NAME);
}
/**
* Convert the passed snapshot description into a 'full' snapshot description based on default
* parameters, if none have been supplied. This resolves any 'optional' parameters that aren't
* supplied to their default values.
* @param snapshot general snapshot descriptor
* @param conf Configuration to read configured snapshot defaults if snapshot is not complete
* @return a valid snapshot description
* @throws IllegalArgumentException if the {@link SnapshotDescription} is not a complete
* {@link SnapshotDescription}.
*/
public static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf)
throws IllegalArgumentException, IOException {
if (!snapshot.hasTable()) {
throw new IllegalArgumentException(
"Descriptor doesn't apply to a table, so we can't build it.");
}
// set the creation time, if one hasn't been set
long time = snapshot.getCreationTime();
if (time == SnapshotDescriptionUtils.NO_SNAPSHOT_START_TIME_SPECIFIED) {
time = EnvironmentEdgeManager.currentTime();
LOG.debug("Creation time not specified, setting to:" + time + " (current time:"
+ EnvironmentEdgeManager.currentTime() + ").");
SnapshotDescription.Builder builder = snapshot.toBuilder();
builder.setCreationTime(time);
snapshot = builder.build();
}
long ttl = snapshot.getTtl();
// set default ttl(sec) if it is not set already or the value is out of the range
if (ttl == SnapshotDescriptionUtils.NO_SNAPSHOT_TTL_SPECIFIED ||
ttl > TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) {
final long defaultSnapshotTtl = conf.getLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY,
HConstants.DEFAULT_SNAPSHOT_TTL);
if (LOG.isDebugEnabled()) {
LOG.debug("Snapshot current TTL value: " + ttl + " resetting it to default value: " +
defaultSnapshotTtl);
}
ttl = defaultSnapshotTtl;
}
SnapshotDescription.Builder builder = snapshot.toBuilder();
builder.setTtl(ttl);
snapshot = builder.build();
// set the acl to snapshot if security feature is enabled.
if(isSecurityAvailable(conf)){
snapshot = writeAclToSnapshotDescription(snapshot, conf);
}
return snapshot;
}
/**
* Write the snapshot description into the working directory of a snapshot
* @param snapshot description of the snapshot being taken
* @param workingDir working directory of the snapshot
* @param fs {@link FileSystem} on which the snapshot should be taken
* @throws IOException if we can't reach the filesystem and the file cannot be cleaned up on
* failure
*/
public static void writeSnapshotInfo(SnapshotDescription snapshot, Path workingDir, FileSystem fs)
throws IOException {
FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(),
HConstants.DATA_FILE_UMASK_KEY);
Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
try {
FSDataOutputStream out = FSUtils.create(fs, snapshotInfo, perms, true);
try {
snapshot.writeTo(out);
} finally {
out.close();
}
} catch (IOException e) {
// if we get an exception, try to remove the snapshot info
if (!fs.delete(snapshotInfo, false)) {
String msg = "Couldn't delete snapshot info file: " + snapshotInfo;
LOG.error(msg);
throw new IOException(msg);
}
}
}
/**
* Read in the {@link SnapshotDescription} stored for the snapshot in the passed directory
* @param fs filesystem where the snapshot was taken
* @param snapshotDir directory where the snapshot was stored
* @return the stored snapshot description
* @throws CorruptedSnapshotException if the
* snapshot cannot be read
*/
public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir)
throws CorruptedSnapshotException {
Path snapshotInfo = new Path(snapshotDir, SNAPSHOTINFO_FILE);
try {
FSDataInputStream in = null;
try {
in = fs.open(snapshotInfo);
SnapshotDescription desc = SnapshotDescription.parseFrom(in);
return desc;
} finally {
if (in != null) in.close();
}
} catch (IOException e) {
throw new CorruptedSnapshotException("Couldn't read snapshot info from:" + snapshotInfo, e);
}
}
/**
* Move the finished snapshot to its final, publicly visible directory - this marks the snapshot
* as 'complete'.
* @param snapshotDir The file path of the completed snapshots
* @param workingDir The file path of the in progress snapshots
* @param fs {@link FileSystem} The file system of the completed snapshots
* @param workingDirFs The file system of the in progress snapshots
* @throws org.apache.hadoop.hbase.snapshot.SnapshotCreationException if the
* snapshot could not be moved
* @throws IOException the filesystem could not be reached
*/
public static void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs,
FileSystem workingDirFs, Configuration conf) throws SnapshotCreationException, IOException {
LOG.debug("Snapshot is done, just moving the snapshot from " + workingDir + " to "
+ snapshotDir);
if (!workingDirFs.exists(workingDir)) {
throw new IOException("Failed to moving nonexistent snapshot working directory "
+ workingDir + " to " + snapshotDir);
}
// If the working and completed snapshot directory are on the same file system, attempt
// to rename the working snapshot directory to the completed location. If that fails,
// or the file systems differ, attempt to copy the directory over, throwing an exception
// if this fails
URI workingURI = workingDirFs.getUri();
URI rootURI = fs.getUri();
if ((shouldSkipRenameSnapshotDirectories(workingURI, rootURI)
|| !fs.rename(workingDir, snapshotDir))
&& !FileUtil.copy(workingDirFs, workingDir, fs, snapshotDir, true, true, conf)) {
throw new SnapshotCreationException("Failed to copy working directory(" + workingDir
+ ") to completed directory(" + snapshotDir + ").");
}
}
@VisibleForTesting
static boolean shouldSkipRenameSnapshotDirectories(URI workingURI, URI rootURI) {
// check scheme, e.g. file, hdfs
if (workingURI.getScheme() == null &&
(rootURI.getScheme() != null && !rootURI.getScheme().equalsIgnoreCase("file"))) {
return true;
}
if (workingURI.getScheme() != null && !workingURI.getScheme().equals(rootURI.getScheme())) {
return true;
}
// check Authority, e.g. localhost:port
if (workingURI.getAuthority() == null && rootURI.getAuthority() != null) {
return true;
}
if (workingURI.getAuthority() != null &&
!workingURI.getAuthority().equals(rootURI.getAuthority())) {
return true;
}
// check UGI/userInfo
if (workingURI.getUserInfo() == null && rootURI.getUserInfo() != null) {
return true;
}
if (workingURI.getUserInfo() != null &&
!workingURI.getUserInfo().equals(rootURI.getUserInfo())) {
return true;
}
return false;
}
/**
* Check if the user is this table snapshot's owner
* @param snapshot the table snapshot description
* @param user the user
* @return true if the user is the owner of the snapshot,
* false otherwise or the snapshot owner field is not present.
*/
public static boolean isSnapshotOwner(final SnapshotDescription snapshot, final User user) {
if (user == null) return false;
if (!snapshot.hasOwner()) return false;
return snapshot.getOwner().equals(user.getShortName());
}
public static boolean isSecurityAvailable(Configuration conf) throws IOException {
Connection conn = ConnectionFactory.createConnection(conf);
try {
Admin admin = conn.getAdmin();
try {
return admin.tableExists(AccessControlLists.ACL_TABLE_NAME);
} finally {
admin.close();
}
} finally {
conn.close();
}
}
private static SnapshotDescription writeAclToSnapshotDescription(
final SnapshotDescription snapshot, final Configuration conf) throws IOException {
ListMultimap perms =
User.runAsLoginUser(new PrivilegedExceptionAction>() {
@Override
public ListMultimap run() throws Exception {
return AccessControlLists.getTablePermissions(conf,
TableName.valueOf(snapshot.getTable()));
}
});
return snapshot.toBuilder().setUsersAndPermissions(ProtobufUtil.toUserTablePermissions(perms))
.build();
}
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy