org.apache.hadoop.hdfs.server.diskbalancer.command.Command Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of hadoop-apache2 Show documentation
Show all versions of hadoop-apache2 Show documentation
Shaded version of Apache Hadoop for Presto
The newest version!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.diskbalancer.command;
import com.facebook.presto.hadoop.$internal.com.fasterxml.jackson.databind.ObjectMapper;
import com.facebook.presto.hadoop.$internal.com.fasterxml.jackson.databind.ObjectReader;
import com.facebook.presto.hadoop.$internal.com.google.common.annotations.VisibleForTesting;
import com.facebook.presto.hadoop.$internal.com.google.common.base.Preconditions;
import com.facebook.presto.hadoop.$internal.com.google.common.collect.Lists;
import com.facebook.presto.hadoop.$internal.org.apache.commons.cli.CommandLine;
import com.facebook.presto.hadoop.$internal.org.apache.commons.cli.Option;
import com.facebook.presto.hadoop.$internal.org.apache.commons.lang3.StringUtils;
import com.facebook.presto.hadoop.$internal.org.apache.commons.text.TextStringBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.HostsFileReader;
import com.facebook.presto.hadoop.$internal.org.slf4j.Logger;
import com.facebook.presto.hadoop.$internal.org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.nio.file.Paths;
import java.text.SimpleDateFormat;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
/**
* Common interface for command handling.
*/
public abstract class Command extends Configured implements Closeable {
private static final ObjectReader READER =
new ObjectMapper().readerFor(HashMap.class);
static final Logger LOG = LoggerFactory.getLogger(Command.class);
private Map validArgs = new HashMap<>();
private URI clusterURI;
private FileSystem fs = null;
private DiskBalancerCluster cluster = null;
private int topNodes;
private PrintStream ps;
private static final Path DEFAULT_LOG_DIR = new Path("/system/diskbalancer");
private Path diskBalancerLogs;
/**
* Constructs a command.
*/
public Command(Configuration conf) {
this(conf, System.out);
}
/**
* Constructs a command.
*/
public Command(Configuration conf, final PrintStream ps) {
super(conf);
// These arguments are valid for all commands.
topNodes = 0;
this.ps = ps;
}
/**
* Cleans any resources held by this command.
*
* The main goal is to delete id file created in
* {@link org.apache.hadoop.hdfs.server.balancer
* .NameNodeConnector#checkAndMarkRunning}
* , otherwise, it's not allowed to run multiple commands in a row.
*
*/
@Override
public void close() throws IOException {
if (fs != null) {
fs.close();
}
}
/**
* Gets printing stream.
* @return print stream
*/
PrintStream getPrintStream() {
return ps;
}
/**
* Executes the Client Calls.
*
* @param cmd - CommandLine
* @throws Exception
*/
public abstract void execute(CommandLine cmd) throws Exception;
/**
* Gets extended help for this command.
*/
public abstract void printHelp();
/**
* Process the URI and return the cluster with nodes setup. This is used in
* all commands.
*
* @param cmd - CommandLine
* @return DiskBalancerCluster
* @throws Exception
*/
protected DiskBalancerCluster readClusterInfo(CommandLine cmd) throws
Exception {
Preconditions.checkNotNull(cmd);
setClusterURI(FileSystem.getDefaultUri(getConf()));
LOG.debug("using name node URI : {}", this.getClusterURI());
ClusterConnector connector = ConnectorFactory.getCluster(this.clusterURI,
getConf());
cluster = new DiskBalancerCluster(connector);
LOG.debug("Reading cluster info");
cluster.readClusterInfo();
return cluster;
}
/**
* Setup the outpath.
*
* @param path - Path or null to use default path.
* @throws IOException
*/
protected void setOutputPath(String path) throws IOException {
SimpleDateFormat format = new SimpleDateFormat("yyyy-MMM-dd-HH-mm-ss");
Date now = new Date();
fs = FileSystem.get(getClusterURI(), getConf());
if (path == null || path.isEmpty()) {
if (getClusterURI().getScheme().startsWith("file")) {
diskBalancerLogs = new Path(
System.getProperty("user.dir") + DEFAULT_LOG_DIR.toString() +
Path.SEPARATOR + format.format(now));
} else {
diskBalancerLogs = new Path(DEFAULT_LOG_DIR.toString() +
Path.SEPARATOR + format.format(now));
}
} else {
diskBalancerLogs = new Path(path);
}
if (fs.exists(diskBalancerLogs)) {
LOG.debug("Another Diskbalancer instance is running ? - Target " +
"Directory already exists. {}", diskBalancerLogs);
throw new IOException("Another DiskBalancer files already exist at the " +
"target location. " + diskBalancerLogs.toString());
}
fs.mkdirs(diskBalancerLogs);
}
/**
* Sets the nodes to process.
*
* @param node - Node
*/
protected void setNodesToProcess(DiskBalancerDataNode node) {
List nodelist = new LinkedList<>();
nodelist.add(node);
setNodesToProcess(nodelist);
}
/**
* Sets the list of Nodes to process.
*
* @param nodes Nodes.
*/
protected void setNodesToProcess(List nodes) {
if (cluster == null) {
throw new IllegalStateException("Set nodes to process invoked before " +
"initializing cluster. Illegal usage.");
}
cluster.setNodesToProcess(nodes);
}
/**
* Returns a DiskBalancer Node from the Cluster or null if not found.
*
* @param nodeName - can the hostname, IP address or UUID of the node.
* @return - DataNode if found.
*/
DiskBalancerDataNode getNode(String nodeName) {
DiskBalancerDataNode node = null;
if (nodeName == null || nodeName.isEmpty()) {
return node;
}
if (cluster.getNodes().size() == 0) {
return node;
}
node = cluster.getNodeByName(nodeName);
if (node != null) {
return node;
}
node = cluster.getNodeByIPAddress(nodeName);
if (node != null) {
return node;
}
node = cluster.getNodeByUUID(nodeName);
return node;
}
/**
* Gets the node set from a file or a string.
*
* @param listArg - String File URL or a comma separated list of node names.
* @return Set of node names
* @throws IOException
*/
protected Set getNodeList(String listArg) throws IOException {
URL listURL;
String nodeData;
Set resultSet = new TreeSet<>();
if ((listArg == null) || listArg.isEmpty()) {
return resultSet;
}
if (listArg.startsWith("file://")) {
listURL = new URL(listArg);
try {
HostsFileReader.readFileToSet("include",
Paths.get(listURL.getPath()).toString(), resultSet);
} catch (FileNotFoundException e) {
String warnMsg = String
.format("The input host file path '%s' is not a valid path. "
+ "Please make sure the host file exists.", listArg);
throw new DiskBalancerException(warnMsg,
DiskBalancerException.Result.INVALID_HOST_FILE_PATH);
}
} else {
nodeData = listArg;
String[] nodes = nodeData.split(",");
if (nodes.length == 0) {
String warnMsg = "The number of input nodes is 0. "
+ "Please input the valid nodes.";
throw new DiskBalancerException(warnMsg,
DiskBalancerException.Result.INVALID_NODE);
}
Collections.addAll(resultSet, nodes);
}
return resultSet;
}
/**
* Returns a DiskBalancer Node list from the Cluster or null if not found.
*
* @param listArg String File URL or a comma separated list of node names.
* @return List of DiskBalancer Node
* @throws IOException
*/
protected List getNodes(String listArg)
throws IOException {
Set nodeNames = null;
List nodeList = Lists.newArrayList();
List invalidNodeList = Lists.newArrayList();
if ((listArg == null) || listArg.isEmpty()) {
return nodeList;
}
nodeNames = getNodeList(listArg);
DiskBalancerDataNode node = null;
if (!nodeNames.isEmpty()) {
for (String name : nodeNames) {
node = getNode(name);
if (node != null) {
nodeList.add(node);
} else {
invalidNodeList.add(name);
}
}
}
if (!invalidNodeList.isEmpty()) {
String invalidNodes = StringUtils.join(invalidNodeList.toArray(), ",");
String warnMsg = String.format(
"The node(s) '%s' not found. "
+ "Please make sure that '%s' exists in the cluster.",
invalidNodes, invalidNodes);
throw new DiskBalancerException(warnMsg,
DiskBalancerException.Result.INVALID_NODE);
}
return nodeList;
}
/**
* Verifies if the command line options are sane.
*
* @param commandName - Name of the command
* @param cmd - Parsed Command Line
*/
protected void verifyCommandOptions(String commandName, CommandLine cmd) {
@SuppressWarnings("unchecked")
Iterator
© 2015 - 2025 Weber Informatics LLC | Privacy Policy