Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
org.apache.hadoop.hive.metastore.ObjectStore Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.metastore;
import static com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.join;
import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedDbName;
import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName;
import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
import java.io.IOException;
import java.lang.reflect.Field;
import java.net.InetAddress;
import java.net.URI;
import java.nio.ByteBuffer;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.SQLIntegrityConstraintViolationException;
import java.sql.Statement;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.TreeSet;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.regex.Pattern;
import javax.jdo.JDOCanRetryException;
import javax.jdo.JDODataStoreException;
import javax.jdo.JDOException;
import javax.jdo.JDOHelper;
import javax.jdo.JDOObjectNotFoundException;
import javax.jdo.PersistenceManager;
import javax.jdo.PersistenceManagerFactory;
import javax.jdo.Query;
import javax.jdo.Transaction;
import javax.jdo.datastore.DataStoreCache;
import javax.jdo.datastore.JDOConnection;
import javax.jdo.identity.IntIdentity;
import javax.sql.DataSource;
import com.facebook.presto.hive.$internal.com.google.common.base.Strings;
import com.facebook.presto.hive.$internal.org.apache.commons.collections.CollectionUtils;
import com.facebook.presto.hive.$internal.org.apache.commons.lang.ArrayUtils;
import com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils;
import com.facebook.presto.hive.$internal.org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown;
import org.apache.hadoop.hive.metastore.api.AggrStats;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Catalog;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
import org.apache.hadoop.hive.metastore.api.Function;
import org.apache.hadoop.hive.metastore.api.FunctionType;
import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
import org.apache.hadoop.hive.metastore.api.HiveObjectType;
import org.apache.hadoop.hive.metastore.api.ISchema;
import org.apache.hadoop.hive.metastore.api.ISchemaName;
import org.apache.hadoop.hive.metastore.api.InvalidInputException;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.NotificationEvent;
import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.PartitionEventType;
import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
import org.apache.hadoop.hive.metastore.api.PartitionValuesRow;
import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
import org.apache.hadoop.hive.metastore.api.ResourceType;
import org.apache.hadoop.hive.metastore.api.ResourceUri;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
import org.apache.hadoop.hive.metastore.api.RuntimeStat;
import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
import org.apache.hadoop.hive.metastore.api.SchemaCompatibility;
import org.apache.hadoop.hive.metastore.api.SchemaType;
import org.apache.hadoop.hive.metastore.api.SchemaValidation;
import org.apache.hadoop.hive.metastore.api.SchemaVersion;
import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.SerdeType;
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.TableMeta;
import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMMapping;
import org.apache.hadoop.hive.metastore.api.WMNullablePool;
import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.api.WMPoolTrigger;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
import org.apache.hadoop.hive.metastore.api.WMTrigger;
import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory;
import org.apache.hadoop.hive.metastore.metrics.Metrics;
import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
import org.apache.hadoop.hive.metastore.model.MCatalog;
import org.apache.hadoop.hive.metastore.model.MColumnDescriptor;
import org.apache.hadoop.hive.metastore.model.MConstraint;
import org.apache.hadoop.hive.metastore.model.MCreationMetadata;
import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
import org.apache.hadoop.hive.metastore.model.MDatabase;
import org.apache.hadoop.hive.metastore.model.MDelegationToken;
import org.apache.hadoop.hive.metastore.model.MFieldSchema;
import org.apache.hadoop.hive.metastore.model.MFunction;
import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege;
import org.apache.hadoop.hive.metastore.model.MISchema;
import org.apache.hadoop.hive.metastore.model.MMasterKey;
import org.apache.hadoop.hive.metastore.model.MMetastoreDBProperties;
import org.apache.hadoop.hive.metastore.model.MNotificationLog;
import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
import org.apache.hadoop.hive.metastore.model.MOrder;
import org.apache.hadoop.hive.metastore.model.MPartition;
import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics;
import org.apache.hadoop.hive.metastore.model.MPartitionEvent;
import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
import org.apache.hadoop.hive.metastore.model.MResourceUri;
import org.apache.hadoop.hive.metastore.model.MRole;
import org.apache.hadoop.hive.metastore.model.MRoleMap;
import org.apache.hadoop.hive.metastore.model.MRuntimeStat;
import org.apache.hadoop.hive.metastore.model.MSchemaVersion;
import org.apache.hadoop.hive.metastore.model.MSerDeInfo;
import org.apache.hadoop.hive.metastore.model.MStorageDescriptor;
import org.apache.hadoop.hive.metastore.model.MStringList;
import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics;
import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
import org.apache.hadoop.hive.metastore.model.MType;
import org.apache.hadoop.hive.metastore.model.MVersionTable;
import org.apache.hadoop.hive.metastore.model.MWMMapping;
import org.apache.hadoop.hive.metastore.model.MWMMapping.EntityType;
import org.apache.hadoop.hive.metastore.model.MWMPool;
import org.apache.hadoop.hive.metastore.model.MWMResourcePlan;
import org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status;
import org.apache.hadoop.hive.metastore.model.MWMTrigger;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
import org.apache.hadoop.hive.metastore.utils.FileUtils;
import org.apache.hadoop.hive.metastore.utils.JavaUtils;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.utils.ObjectPair;
import org.apache.thrift.TException;
import org.datanucleus.AbstractNucleusContext;
import org.datanucleus.ClassLoaderResolver;
import org.datanucleus.ClassLoaderResolverImpl;
import org.datanucleus.NucleusContext;
import org.datanucleus.api.jdo.JDOPersistenceManager;
import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
import org.datanucleus.store.rdbms.exceptions.MissingTableException;
import org.datanucleus.store.scostore.Store;
import org.datanucleus.util.WeakValueMap;
import com.facebook.presto.hive.$internal.org.slf4j.Logger;
import com.facebook.presto.hive.$internal.org.slf4j.LoggerFactory;
import com.facebook.presto.hive.$internal.com.codahale.metrics.Counter;
import com.facebook.presto.hive.$internal.com.codahale.metrics.MetricRegistry;
import com.facebook.presto.hive.$internal.com.google.common.annotations.VisibleForTesting;
import com.facebook.presto.hive.$internal.com.google.common.base.Preconditions;
import com.facebook.presto.hive.$internal.com.google.common.collect.Lists;
import com.facebook.presto.hive.$internal.com.google.common.collect.Maps;
import com.facebook.presto.hive.$internal.com.google.common.collect.Sets;
/**
* This class is the interface between the application logic and the database
* store that contains the objects. Refrain putting any logic in mode.M* objects
* or in this file as former could be auto generated and this class would need
* to be made into a interface that can read both from a database and a
* filestore.
*/
public class ObjectStore implements RawStore, Configurable {
private static Properties prop = null;
private static PersistenceManagerFactory pmf = null;
private static boolean forTwoMetastoreTesting = false;
private static final DateTimeFormatter YMDHMS_FORMAT = DateTimeFormatter.ofPattern(
"yyyy_MM_dd_HH_mm_ss");
private static Lock pmfPropLock = new ReentrantLock();
/**
* Verify the schema only once per JVM since the db connection info is static
*/
private final static AtomicBoolean isSchemaVerified = new AtomicBoolean(false);
private static final Logger LOG = LoggerFactory.getLogger(ObjectStore.class);
private enum TXN_STATUS {
NO_STATE, OPEN, COMMITED, ROLLBACK
}
private static final Map> PINCLASSMAP;
private static final String HOSTNAME;
private static final String USER;
private static final String JDO_PARAM = ":param";
static {
Map> map = new HashMap<>();
map.put("table", MTable.class);
map.put("storagedescriptor", MStorageDescriptor.class);
map.put("serdeinfo", MSerDeInfo.class);
map.put("partition", MPartition.class);
map.put("database", MDatabase.class);
map.put("type", MType.class);
map.put("fieldschema", MFieldSchema.class);
map.put("order", MOrder.class);
PINCLASSMAP = Collections.unmodifiableMap(map);
String hostname = "UNKNOWN";
try {
InetAddress clientAddr = InetAddress.getLocalHost();
hostname = clientAddr.getHostAddress();
} catch (IOException e) {
}
HOSTNAME = hostname;
String user = System.getenv("USER");
USER = com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.defaultString(user, "UNKNOWN");
}
private boolean isInitialized = false;
private PersistenceManager pm = null;
private SQLGenerator sqlGenerator = null;
private MetaStoreDirectSql directSql = null;
private DatabaseProduct dbType = null;
private PartitionExpressionProxy expressionProxy = null;
private Configuration conf;
private volatile int openTrasactionCalls = 0;
private Transaction currentTransaction = null;
private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE;
private Pattern partitionValidationPattern;
private Counter directSqlErrors;
/**
* A Autocloseable wrapper around Query class to pass the Query object to the caller and let the caller release
* the resources when the QueryWrapper goes out of scope
*/
public static class QueryWrapper implements AutoCloseable {
public Query query;
/**
* Explicitly closes the query object to release the resources
*/
@Override
public void close() {
if (query != null) {
query.closeAll();
query = null;
}
}
}
public ObjectStore() {
}
@Override
public Configuration getConf() {
return conf;
}
/**
* Called whenever this object is instantiated using ReflectionUtils, and also
* on connection retries. In cases of connection retries, conf will usually
* contain modified values.
*/
@Override
@SuppressWarnings("nls")
public void setConf(Configuration conf) {
// Although an instance of ObjectStore is accessed by one thread, there may
// be many threads with ObjectStore instances. So the static variables
// pmf and prop need to be protected with locks.
pmfPropLock.lock();
try {
isInitialized = false;
this.conf = conf;
configureSSL(conf);
Properties propsFromConf = getDataSourceProps(conf);
boolean propsChanged = !propsFromConf.equals(prop);
if (propsChanged) {
if (pmf != null){
clearOutPmfClassLoaderCache(pmf);
if (!forTwoMetastoreTesting) {
// close the underlying connection pool to avoid leaks
pmf.close();
}
}
pmf = null;
prop = null;
}
assert(!isActiveTransaction());
shutdown();
// Always want to re-create pm as we don't know if it were created by the
// most recent instance of the pmf
pm = null;
directSql = null;
expressionProxy = null;
openTrasactionCalls = 0;
currentTransaction = null;
transactionStatus = TXN_STATUS.NO_STATE;
initialize(propsFromConf);
String partitionValidationRegex =
MetastoreConf.getVar(this.conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN);
if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
partitionValidationPattern = Pattern.compile(partitionValidationRegex);
} else {
partitionValidationPattern = null;
}
// Note, if metrics have not been initialized this will return null, which means we aren't
// using metrics. Thus we should always check whether this is non-null before using.
MetricRegistry registry = Metrics.getRegistry();
if (registry != null) {
directSqlErrors = Metrics.getOrCreateCounter(MetricsConstants.DIRECTSQL_ERRORS);
}
if (!isInitialized) {
throw new RuntimeException(
"Unable to create persistence manager. Check dss.log for details");
} else {
LOG.info("Initialized ObjectStore");
}
} finally {
pmfPropLock.unlock();
}
}
private ClassLoader classLoader;
{
classLoader = Thread.currentThread().getContextClassLoader();
if (classLoader == null) {
classLoader = ObjectStore.class.getClassLoader();
}
}
@SuppressWarnings("nls")
private void initialize(Properties dsProps) {
int retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMS_HANDLER_ATTEMPTS);
long retryInterval = MetastoreConf.getTimeVar(conf,
ConfVars.HMS_HANDLER_INTERVAL, TimeUnit.MILLISECONDS);
int numTries = retryLimit;
while (numTries > 0){
try {
initializeHelper(dsProps);
return; // If we reach here, we succeed.
} catch (Exception e){
numTries--;
boolean retriable = isRetriableException(e);
if ((numTries > 0) && retriable){
LOG.info("Retriable exception while instantiating ObjectStore, retrying. " +
"{} tries left", numTries, e);
try {
Thread.sleep(retryInterval);
} catch (InterruptedException ie) {
// Restore the interrupted status, since we do not want to catch it.
LOG.debug("Interrupted while sleeping before retrying.", ie);
Thread.currentThread().interrupt();
}
// If we're here, we'll proceed down the next while loop iteration.
} else {
// we've reached our limit, throw the last one.
if (retriable){
LOG.warn("Exception retry limit reached, not retrying any longer.",
e);
} else {
LOG.debug("Non-retriable exception during ObjectStore initialize.", e);
}
throw e;
}
}
}
}
private static final Set> retriableExceptionClasses =
new HashSet<>(Arrays.asList(JDOCanRetryException.class));
/**
* Helper function for initialize to determine if we should retry an exception.
* We return true if the exception is of a known type of retriable exceptions, or if one
* of its recursive .getCause returns a known type of retriable exception.
*/
private boolean isRetriableException(Throwable e) {
if (e == null){
return false;
}
if (retriableExceptionClasses.contains(e.getClass())){
return true;
}
for (Class extends Throwable> c : retriableExceptionClasses){
if (c.isInstance(e)){
return true;
}
}
if (e.getCause() == null){
return false;
}
return isRetriableException(e.getCause());
}
/**
* private helper to do initialization routine, so we can retry if needed if it fails.
* @param dsProps
*/
private void initializeHelper(Properties dsProps) {
LOG.info("ObjectStore, initialize called");
prop = dsProps;
pm = getPersistenceManager();
try {
String productName = MetaStoreDirectSql.getProductName(pm);
sqlGenerator = new SQLGenerator(DatabaseProduct.determineDatabaseProduct(productName), conf);
} catch (SQLException e) {
LOG.error("error trying to figure out the database product", e);
throw new RuntimeException(e);
}
isInitialized = pm != null;
if (isInitialized) {
dbType = determineDatabaseProduct();
expressionProxy = createExpressionProxy(conf);
if (MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL)) {
String schema = prop.getProperty("javax.jdo.mapping.Schema");
schema = com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.defaultIfBlank(schema, null);
directSql = new MetaStoreDirectSql(pm, conf, schema);
}
}
LOG.debug("RawStore: {}, with PersistenceManager: {}" +
" created in the thread with id: {}", this, pm, Thread.currentThread().getId());
}
private DatabaseProduct determineDatabaseProduct() {
try {
return DatabaseProduct.determineDatabaseProduct(getProductName(pm));
} catch (SQLException e) {
LOG.warn("Cannot determine database product; assuming OTHER", e);
return DatabaseProduct.OTHER;
}
}
private static String getProductName(PersistenceManager pm) {
JDOConnection jdoConn = pm.getDataStoreConnection();
try {
return ((Connection)jdoConn.getNativeConnection()).getMetaData().getDatabaseProductName();
} catch (Throwable t) {
LOG.warn("Error retrieving product name", t);
return null;
} finally {
jdoConn.close(); // We must release the connection before we call other pm methods.
}
}
/**
* Creates the proxy used to evaluate expressions. This is here to prevent circular
* dependency - ql -> metastore client <-> metastore server -> ql. If server and
* client are split, this can be removed.
* @param conf Configuration.
* @return The partition expression proxy.
*/
private static PartitionExpressionProxy createExpressionProxy(Configuration conf) {
String className = MetastoreConf.getVar(conf, ConfVars.EXPRESSION_PROXY_CLASS);
try {
Class extends PartitionExpressionProxy> clazz =
JavaUtils.getClass(className, PartitionExpressionProxy.class);
return JavaUtils.newInstance(clazz, new Class>[0], new Object[0]);
} catch (MetaException e) {
LOG.error("Error loading PartitionExpressionProxy", e);
throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage());
}
}
/**
* Configure the SSL properties of the connection from provided config
* @param conf
*/
private static void configureSSL(Configuration conf) {
// SSL support
String sslPropString = MetastoreConf.getVar(conf, ConfVars.DBACCESS_SSL_PROPS);
if (com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.isNotEmpty(sslPropString)) {
LOG.info("Metastore setting SSL properties of the connection to backed DB");
for (String sslProp : sslPropString.split(",")) {
String[] pair = sslProp.trim().split("=");
if (pair != null && pair.length == 2) {
System.setProperty(pair[0].trim(), pair[1].trim());
} else {
LOG.warn("Invalid metastore property value for {}", ConfVars.DBACCESS_SSL_PROPS);
}
}
}
}
/**
* Properties specified in hive-default.xml override the properties specified
* in jpox.properties.
*/
@SuppressWarnings("nls")
private static Properties getDataSourceProps(Configuration conf) {
Properties prop = new Properties();
correctAutoStartMechanism(conf);
// First, go through and set all our values for datanucleus and javax.jdo parameters. This
// has to be a separate first step because we don't set the default values in the config object.
for (ConfVars var : MetastoreConf.dataNucleusAndJdoConfs) {
String confVal = MetastoreConf.getAsString(conf, var);
String varName = var.getVarname();
Object prevVal = prop.setProperty(varName, confVal);
if (MetastoreConf.isPrintable(varName)) {
LOG.debug("Overriding {} value {} from jpox.properties with {}",
varName, prevVal, confVal);
}
}
// Now, we need to look for any values that the user set that MetastoreConf doesn't know about.
// TODO Commenting this out for now, as it breaks because the conf values aren't getting properly
// interpolated in case of variables. See HIVE-17788.
/*
for (Map.Entry e : conf) {
if (e.getKey().startsWith("datanucleus.") || e.getKey().startsWith("javax.jdo.")) {
// We have to handle this differently depending on whether it is a value known to
// MetastoreConf or not. If it is, we need to get the default value if a value isn't
// provided. If not, we just set whatever the user has set.
Object prevVal = prop.setProperty(e.getKey(), e.getValue());
if (LOG.isDebugEnabled() && MetastoreConf.isPrintable(e.getKey())) {
LOG.debug("Overriding " + e.getKey() + " value " + prevVal
+ " from jpox.properties with " + e.getValue());
}
}
}
*/
// Password may no longer be in the conf, use getPassword()
try {
String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
if (com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.isNotEmpty(passwd)) {
// We can get away with the use of varname here because varname == hiveName for PWD
prop.setProperty(ConfVars.PWD.getVarname(), passwd);
}
} catch (IOException err) {
throw new RuntimeException("Error getting metastore password: " + err.getMessage(), err);
}
if (LOG.isDebugEnabled()) {
for (Entry e : prop.entrySet()) {
if (MetastoreConf.isPrintable(e.getKey().toString())) {
LOG.debug("{} = {}", e.getKey(), e.getValue());
}
}
}
return prop;
}
/**
* Update conf to set datanucleus.autoStartMechanismMode=ignored.
* This is necessary to able to use older version of hive against
* an upgraded but compatible metastore schema in db from new version
* of hive
* @param conf
*/
private static void correctAutoStartMechanism(Configuration conf) {
final String autoStartKey = "datanucleus.autoStartMechanismMode";
final String autoStartIgnore = "ignored";
String currentAutoStartVal = conf.get(autoStartKey);
if (!autoStartIgnore.equalsIgnoreCase(currentAutoStartVal)) {
LOG.warn("{} is set to unsupported value {} . Setting it to value: {}", autoStartKey,
conf.get(autoStartKey), autoStartIgnore);
}
conf.set(autoStartKey, autoStartIgnore);
}
private static synchronized PersistenceManagerFactory getPMF() {
if (pmf == null) {
Configuration conf = MetastoreConf.newMetastoreConf();
DataSourceProvider dsp = DataSourceProviderFactory.getDataSourceProvider(conf);
if (dsp == null) {
pmf = JDOHelper.getPersistenceManagerFactory(prop);
} else {
try {
DataSource ds = dsp.create(conf);
Map dsProperties = new HashMap<>();
//Any preexisting datanucleus property should be passed along
dsProperties.putAll(prop);
dsProperties.put("datanucleus.ConnectionFactory", ds);
dsProperties.put("javax.jdo.PersistenceManagerFactoryClass",
"org.datanucleus.api.jdo.JDOPersistenceManagerFactory");
pmf = JDOHelper.getPersistenceManagerFactory(dsProperties);
} catch (SQLException e) {
LOG.warn("Could not create PersistenceManagerFactory using " +
"connection pool properties, will fall back", e);
pmf = JDOHelper.getPersistenceManagerFactory(prop);
}
}
DataStoreCache dsc = pmf.getDataStoreCache();
if (dsc != null) {
String objTypes = MetastoreConf.getVar(conf, ConfVars.CACHE_PINOBJTYPES);
LOG.info("Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes=\"{}\"", objTypes);
if (com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.isNotEmpty(objTypes)) {
String[] typeTokens = objTypes.toLowerCase().split(",");
for (String type : typeTokens) {
type = type.trim();
if (PINCLASSMAP.containsKey(type)) {
dsc.pinAll(true, PINCLASSMAP.get(type));
} else {
LOG.warn("{} is not one of the pinnable object types: {}", type,
com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.join(PINCLASSMAP.keySet(), " "));
}
}
}
} else {
LOG.warn("PersistenceManagerFactory returned null DataStoreCache object. Unable to initialize object pin types defined by hive.metastore.cache.pinobjtypes");
}
}
return pmf;
}
@InterfaceAudience.LimitedPrivate({"HCATALOG"})
@InterfaceStability.Evolving
public PersistenceManager getPersistenceManager() {
return getPMF().getPersistenceManager();
}
@Override
public void shutdown() {
LOG.debug("RawStore: {}, with PersistenceManager: {} will be shutdown", this, pm);
if (pm != null) {
pm.close();
pm = null;
}
}
/**
* Opens a new one or the one already created Every call of this function must
* have corresponding commit or rollback function call
*
* @return an active transaction
*/
@Override
public boolean openTransaction() {
openTrasactionCalls++;
if (openTrasactionCalls == 1) {
currentTransaction = pm.currentTransaction();
currentTransaction.begin();
transactionStatus = TXN_STATUS.OPEN;
} else {
// openTransactionCalls > 1 means this is an interior transaction
// We should already have a transaction created that is active.
if ((currentTransaction == null) || (!currentTransaction.isActive())){
throw new RuntimeException("openTransaction called in an interior"
+ " transaction scope, but currentTransaction is not active.");
}
}
boolean result = currentTransaction.isActive();
debugLog("Open transaction: count = " + openTrasactionCalls + ", isActive = " + result);
return result;
}
/**
* if this is the commit of the first open call then an actual commit is
* called.
*
* @return Always returns true
*/
@Override
@SuppressWarnings("nls")
public boolean commitTransaction() {
if (TXN_STATUS.ROLLBACK == transactionStatus) {
debugLog("Commit transaction: rollback");
return false;
}
if (openTrasactionCalls <= 0) {
RuntimeException e = new RuntimeException("commitTransaction was called but openTransactionCalls = "
+ openTrasactionCalls + ". This probably indicates that there are unbalanced " +
"calls to openTransaction/commitTransaction");
LOG.error("Unbalanced calls to open/commit Transaction", e);
throw e;
}
if (!currentTransaction.isActive()) {
RuntimeException e = new RuntimeException("commitTransaction was called but openTransactionCalls = "
+ openTrasactionCalls + ". This probably indicates that there are unbalanced " +
"calls to openTransaction/commitTransaction");
LOG.error("Unbalanced calls to open/commit Transaction", e);
throw e;
}
openTrasactionCalls--;
debugLog("Commit transaction: count = " + openTrasactionCalls + ", isactive "+ currentTransaction.isActive());
if ((openTrasactionCalls == 0) && currentTransaction.isActive()) {
transactionStatus = TXN_STATUS.COMMITED;
currentTransaction.commit();
}
return true;
}
/**
* @return true if there is an active transaction. If the current transaction
* is either committed or rolled back it returns false
*/
@Override
public boolean isActiveTransaction() {
if (currentTransaction == null) {
return false;
}
return currentTransaction.isActive();
}
/**
* Rolls back the current transaction if it is active
*/
@Override
public void rollbackTransaction() {
if (openTrasactionCalls < 1) {
debugLog("rolling back transaction: no open transactions: " + openTrasactionCalls);
return;
}
debugLog("Rollback transaction, isActive: " + currentTransaction.isActive());
try {
if (currentTransaction.isActive()
&& transactionStatus != TXN_STATUS.ROLLBACK) {
currentTransaction.rollback();
}
} finally {
openTrasactionCalls = 0;
transactionStatus = TXN_STATUS.ROLLBACK;
// remove all detached objects from the cache, since the transaction is
// being rolled back they are no longer relevant, and this prevents them
// from reattaching in future transactions
pm.evictAll();
}
}
@Override
public void createCatalog(Catalog cat) throws MetaException {
LOG.debug("Creating catalog " + cat.getName());
boolean committed = false;
MCatalog mCat = catToMCat(cat);
try {
openTransaction();
pm.makePersistent(mCat);
committed = commitTransaction();
} finally {
if (!committed) {
rollbackTransaction();
}
}
}
@Override
public void alterCatalog(String catName, Catalog cat)
throws MetaException, InvalidOperationException {
if (!cat.getName().equals(catName)) {
throw new InvalidOperationException("You cannot change a catalog's name");
}
boolean committed = false;
try {
MCatalog mCat = getMCatalog(catName);
if (com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.isNotBlank(cat.getLocationUri())) {
mCat.setLocationUri(cat.getLocationUri());
}
if (com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.isNotBlank(cat.getDescription())) {
mCat.setDescription(cat.getDescription());
}
openTransaction();
pm.makePersistent(mCat);
committed = commitTransaction();
} finally {
if (!committed) {
rollbackTransaction();
}
}
}
@Override
public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
LOG.debug("Fetching catalog " + catalogName);
MCatalog mCat = getMCatalog(catalogName);
if (mCat == null) {
throw new NoSuchObjectException("No catalog " + catalogName);
}
return mCatToCat(mCat);
}
@Override
public List getCatalogs() throws MetaException {
LOG.debug("Fetching all catalog names");
boolean commited = false;
List catalogs = null;
String queryStr = "select name from org.apache.hadoop.hive.metastore.model.MCatalog";
Query query = null;
openTransaction();
try {
query = pm.newQuery(queryStr);
query.setResult("name");
catalogs = new ArrayList<>((Collection) query.execute());
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
Collections.sort(catalogs);
return catalogs;
}
@Override
public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
LOG.debug("Dropping catalog " + catalogName);
boolean committed = false;
try {
openTransaction();
MCatalog mCat = getMCatalog(catalogName);
pm.retrieve(mCat);
if (mCat == null) {
throw new NoSuchObjectException("No catalog " + catalogName);
}
pm.deletePersistent(mCat);
committed = commitTransaction();
} finally {
if (!committed) {
rollbackTransaction();
}
}
}
private MCatalog getMCatalog(String catalogName) throws MetaException {
boolean committed = false;
Query query = null;
try {
openTransaction();
catalogName = normalizeIdentifier(catalogName);
query = pm.newQuery(MCatalog.class, "name == catname");
query.declareParameters("java.lang.String catname");
query.setUnique(true);
MCatalog mCat = (MCatalog)query.execute(catalogName);
pm.retrieve(mCat);
committed = commitTransaction();
return mCat;
} finally {
rollbackAndCleanup(committed, query);
}
}
private MCatalog catToMCat(Catalog cat) {
MCatalog mCat = new MCatalog();
mCat.setName(normalizeIdentifier(cat.getName()));
if (cat.isSetDescription()) {
mCat.setDescription(cat.getDescription());
}
mCat.setLocationUri(cat.getLocationUri());
return mCat;
}
private Catalog mCatToCat(MCatalog mCat) {
Catalog cat = new Catalog(mCat.getName(), mCat.getLocationUri());
if (mCat.getDescription() != null) {
cat.setDescription(mCat.getDescription());
}
return cat;
}
@Override
public void createDatabase(Database db) throws InvalidObjectException, MetaException {
boolean commited = false;
MDatabase mdb = new MDatabase();
assert db.getCatalogName() != null;
mdb.setCatalogName(normalizeIdentifier(db.getCatalogName()));
assert mdb.getCatalogName() != null;
mdb.setName(db.getName().toLowerCase());
mdb.setLocationUri(db.getLocationUri());
mdb.setDescription(db.getDescription());
mdb.setParameters(db.getParameters());
mdb.setOwnerName(db.getOwnerName());
PrincipalType ownerType = db.getOwnerType();
mdb.setOwnerType((null == ownerType ? PrincipalType.USER.name() : ownerType.name()));
try {
openTransaction();
pm.makePersistent(mdb);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
}
@SuppressWarnings("nls")
private MDatabase getMDatabase(String catName, String name) throws NoSuchObjectException {
MDatabase mdb = null;
boolean commited = false;
Query query = null;
try {
openTransaction();
name = normalizeIdentifier(name);
catName = normalizeIdentifier(catName);
query = pm.newQuery(MDatabase.class, "name == dbname && catalogName == catname");
query.declareParameters("java.lang.String dbname, java.lang.String catname");
query.setUnique(true);
mdb = (MDatabase) query.execute(name, catName);
pm.retrieve(mdb);
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
if (mdb == null) {
throw new NoSuchObjectException("There is no database " + catName + "." + name);
}
return mdb;
}
@Override
public Database getDatabase(String catalogName, String name) throws NoSuchObjectException {
MetaException ex = null;
Database db = null;
try {
db = getDatabaseInternal(catalogName, name);
} catch (MetaException e) {
// Signature restriction to NSOE, and NSOE being a flat exception prevents us from
// setting the cause of the NSOE as the MetaException. We should not lose the info
// we got here, but it's very likely that the MetaException is irrelevant and is
// actually an NSOE message, so we should log it and throw an NSOE with the msg.
ex = e;
}
if (db == null) {
LOG.warn("Failed to get database {}.{}, returning NoSuchObjectException",
catalogName, name, ex);
throw new NoSuchObjectException(name + (ex == null ? "" : (": " + ex.getMessage())));
}
return db;
}
public Database getDatabaseInternal(String catalogName, String name)
throws MetaException, NoSuchObjectException {
return new GetDbHelper(catalogName, name, true, true) {
@Override
protected Database getSqlResult(GetHelper ctx) throws MetaException {
return directSql.getDatabase(catalogName, dbName);
}
@Override
protected Database getJdoResult(GetHelper ctx) throws MetaException, NoSuchObjectException {
return getJDODatabase(catalogName, dbName);
}
}.run(false);
}
public Database getJDODatabase(String catName, String name) throws NoSuchObjectException {
MDatabase mdb = null;
boolean commited = false;
try {
openTransaction();
mdb = getMDatabase(catName, name);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
Database db = new Database();
db.setName(mdb.getName());
db.setDescription(mdb.getDescription());
db.setLocationUri(mdb.getLocationUri());
db.setParameters(convertMap(mdb.getParameters()));
db.setOwnerName(mdb.getOwnerName());
String type = com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.defaultIfBlank(mdb.getOwnerType(), null);
PrincipalType principalType = (type == null) ? null : PrincipalType.valueOf(type);
db.setOwnerType(principalType);
db.setCatalogName(catName);
return db;
}
/**
* Alter the database object in metastore. Currently only the parameters
* of the database or the owner can be changed.
* @param dbName the database name
* @param db the Hive Database object
* @throws MetaException
* @throws NoSuchObjectException
*/
@Override
public boolean alterDatabase(String catName, String dbName, Database db)
throws MetaException, NoSuchObjectException {
MDatabase mdb = null;
boolean committed = false;
try {
mdb = getMDatabase(catName, dbName);
mdb.setParameters(db.getParameters());
mdb.setOwnerName(db.getOwnerName());
if (db.getOwnerType() != null) {
mdb.setOwnerType(db.getOwnerType().name());
}
if (com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.isNotBlank(db.getDescription())) {
mdb.setDescription(db.getDescription());
}
if (com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.isNotBlank(db.getLocationUri())) {
mdb.setLocationUri(db.getLocationUri());
}
openTransaction();
pm.makePersistent(mdb);
committed = commitTransaction();
} finally {
if (!committed) {
rollbackTransaction();
return false;
}
}
return true;
}
@Override
public boolean dropDatabase(String catName, String dbname)
throws NoSuchObjectException, MetaException {
boolean success = false;
LOG.info("Dropping database {}.{} along with all tables", catName, dbname);
dbname = normalizeIdentifier(dbname);
catName = normalizeIdentifier(catName);
QueryWrapper queryWrapper = new QueryWrapper();
try {
openTransaction();
// then drop the database
MDatabase db = getMDatabase(catName, dbname);
pm.retrieve(db);
if (db != null) {
List dbGrants = this.listDatabaseGrants(catName, dbname, queryWrapper);
if (CollectionUtils.isNotEmpty(dbGrants)) {
pm.deletePersistentAll(dbGrants);
}
pm.deletePersistent(db);
}
success = commitTransaction();
} finally {
rollbackAndCleanup(success, queryWrapper);
}
return success;
}
@Override
public List getDatabases(String catName, String pattern) throws MetaException {
if (pattern == null || pattern.equals("*")) {
return getAllDatabases(catName);
}
boolean commited = false;
List databases = null;
Query query = null;
try {
openTransaction();
// Take the pattern and split it on the | to get all the composing
// patterns
String[] subpatterns = pattern.trim().split("\\|");
StringBuilder filterBuilder = new StringBuilder();
List parameterVals = new ArrayList<>(subpatterns.length);
appendSimpleCondition(filterBuilder, "catalogName", new String[] {catName}, parameterVals);
appendPatternCondition(filterBuilder, "name", subpatterns, parameterVals);
query = pm.newQuery(MDatabase.class, filterBuilder.toString());
query.setResult("name");
query.setOrdering("name ascending");
Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[0]));
databases = new ArrayList<>(names);
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
return databases;
}
@Override
public List getAllDatabases(String catName) throws MetaException {
boolean commited = false;
List databases = null;
Query query = null;
catName = normalizeIdentifier(catName);
openTransaction();
try {
query = pm.newQuery("select name from org.apache.hadoop.hive.metastore.model.MDatabase " +
"where catalogName == catname");
query.declareParameters("java.lang.String catname");
query.setResult("name");
databases = new ArrayList<>((Collection) query.execute(catName));
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
Collections.sort(databases);
return databases;
}
private MType getMType(Type type) {
List fields = new ArrayList<>();
if (type.getFields() != null) {
for (FieldSchema field : type.getFields()) {
fields.add(new MFieldSchema(field.getName(), field.getType(), field
.getComment()));
}
}
return new MType(type.getName(), type.getType1(), type.getType2(), fields);
}
private Type getType(MType mtype) {
List fields = new ArrayList<>();
if (mtype.getFields() != null) {
for (MFieldSchema field : mtype.getFields()) {
fields.add(new FieldSchema(field.getName(), field.getType(), field
.getComment()));
}
}
Type ret = new Type();
ret.setName(mtype.getName());
ret.setType1(mtype.getType1());
ret.setType2(mtype.getType2());
ret.setFields(fields);
return ret;
}
@Override
public boolean createType(Type type) {
boolean success = false;
MType mtype = getMType(type);
boolean commited = false;
try {
openTransaction();
pm.makePersistent(mtype);
commited = commitTransaction();
success = true;
} finally {
if (!commited) {
rollbackTransaction();
}
}
return success;
}
@Override
public Type getType(String typeName) {
Type type = null;
boolean commited = false;
Query query = null;
try {
openTransaction();
query = pm.newQuery(MType.class, "name == typeName");
query.declareParameters("java.lang.String typeName");
query.setUnique(true);
MType mtype = (MType) query.execute(typeName.trim());
pm.retrieve(type);
if (mtype != null) {
type = getType(mtype);
}
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
return type;
}
@Override
public boolean dropType(String typeName) {
boolean success = false;
Query query = null;
try {
openTransaction();
query = pm.newQuery(MType.class, "name == typeName");
query.declareParameters("java.lang.String typeName");
query.setUnique(true);
MType type = (MType) query.execute(typeName.trim());
pm.retrieve(type);
if (type != null) {
pm.deletePersistent(type);
}
success = commitTransaction();
} catch (JDOObjectNotFoundException e) {
success = commitTransaction();
LOG.debug("type not found {}", typeName, e);
} finally {
rollbackAndCleanup(success, query);
}
return success;
}
@Override
public List createTableWithConstraints(Table tbl,
List primaryKeys, List foreignKeys,
List uniqueConstraints, List notNullConstraints,
List defaultConstraints, List checkConstraints)
throws InvalidObjectException, MetaException {
boolean success = false;
try {
openTransaction();
createTable(tbl);
// Add constraints.
// We need not do a deep retrieval of the Table Column Descriptor while persisting the
// constraints since this transaction involving create table is not yet committed.
List constraintNames = new ArrayList<>();
if (foreignKeys != null) {
constraintNames.addAll(addForeignKeys(foreignKeys, false, primaryKeys, uniqueConstraints));
}
if (primaryKeys != null) {
constraintNames.addAll(addPrimaryKeys(primaryKeys, false));
}
if (uniqueConstraints != null) {
constraintNames.addAll(addUniqueConstraints(uniqueConstraints, false));
}
if (notNullConstraints != null) {
constraintNames.addAll(addNotNullConstraints(notNullConstraints, false));
}
if (defaultConstraints != null) {
constraintNames.addAll(addDefaultConstraints(defaultConstraints, false));
}
if (checkConstraints != null) {
constraintNames.addAll(addCheckConstraints(checkConstraints, false));
}
success = commitTransaction();
return constraintNames;
} finally {
if (!success) {
rollbackTransaction();
}
}
}
@Override
public void createTable(Table tbl) throws InvalidObjectException, MetaException {
boolean commited = false;
try {
openTransaction();
MTable mtbl = convertToMTable(tbl);
pm.makePersistent(mtbl);
if (tbl.getCreationMetadata() != null) {
MCreationMetadata mcm = convertToMCreationMetadata(tbl.getCreationMetadata());
pm.makePersistent(mcm);
}
PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges();
List toPersistPrivObjs = new ArrayList<>();
if (principalPrivs != null) {
int now = (int)(System.currentTimeMillis()/1000);
Map> userPrivs = principalPrivs.getUserPrivileges();
putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, userPrivs, PrincipalType.USER);
Map> groupPrivs = principalPrivs.getGroupPrivileges();
putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, groupPrivs, PrincipalType.GROUP);
Map> rolePrivs = principalPrivs.getRolePrivileges();
putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, rolePrivs, PrincipalType.ROLE);
}
pm.makePersistentAll(toPersistPrivObjs);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
} else {
if (MetaStoreUtils.isMaterializedViewTable(tbl)) {
// Add to the invalidation cache
MaterializationsInvalidationCache.get().createMaterializedView(
tbl.getDbName(), tbl.getTableName(), tbl.getCreationMetadata().getTablesUsed(),
tbl.getCreationMetadata().getValidTxnList());
}
}
}
}
/**
* Convert PrivilegeGrantInfo from privMap to MTablePrivilege, and add all of
* them to the toPersistPrivObjs. These privilege objects will be persisted as
* part of createTable.
*
* @param mtbl
* @param toPersistPrivObjs
* @param now
* @param privMap
* @param type
*/
private void putPersistentPrivObjects(MTable mtbl, List toPersistPrivObjs,
int now, Map> privMap, PrincipalType type) {
if (privMap != null) {
for (Map.Entry> entry : privMap
.entrySet()) {
String principalName = entry.getKey();
List privs = entry.getValue();
for (int i = 0; i < privs.size(); i++) {
PrivilegeGrantInfo priv = privs.get(i);
if (priv == null) {
continue;
}
MTablePrivilege mTblSec = new MTablePrivilege(
principalName, type.toString(), mtbl, priv.getPrivilege(),
now, priv.getGrantor(), priv.getGrantorType().toString(), priv
.isGrantOption());
toPersistPrivObjs.add(mTblSec);
}
}
}
}
@Override
public boolean dropTable(String catName, String dbName, String tableName)
throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
boolean materializedView = false;
boolean success = false;
try {
openTransaction();
MTable tbl = getMTable(catName, dbName, tableName);
pm.retrieve(tbl);
if (tbl != null) {
materializedView = TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType());
// first remove all the grants
List tabGrants = listAllTableGrants(catName, dbName, tableName);
if (CollectionUtils.isNotEmpty(tabGrants)) {
pm.deletePersistentAll(tabGrants);
}
List tblColGrants = listTableAllColumnGrants(catName, dbName,
tableName);
if (CollectionUtils.isNotEmpty(tblColGrants)) {
pm.deletePersistentAll(tblColGrants);
}
List partGrants = this.listTableAllPartitionGrants(catName, dbName, tableName);
if (CollectionUtils.isNotEmpty(partGrants)) {
pm.deletePersistentAll(partGrants);
}
List partColGrants = listTableAllPartitionColumnGrants(catName, dbName,
tableName);
if (CollectionUtils.isNotEmpty(partColGrants)) {
pm.deletePersistentAll(partColGrants);
}
// delete column statistics if present
try {
deleteTableColumnStatistics(catName, dbName, tableName, null);
} catch (NoSuchObjectException e) {
LOG.info("Found no table level column statistics associated with {} to delete",
getCatalogQualifiedTableName(catName, dbName, tableName));
}
List tabConstraints = listAllTableConstraintsWithOptionalConstraintName(
catName, dbName, tableName, null);
if (CollectionUtils.isNotEmpty(tabConstraints)) {
pm.deletePersistentAll(tabConstraints);
}
preDropStorageDescriptor(tbl.getSd());
if (materializedView) {
dropCreationMetadata(tbl.getDatabase().getCatalogName(),
tbl.getDatabase().getName(), tbl.getTableName());
}
// then remove the table
pm.deletePersistentAll(tbl);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
} else {
if (materializedView) {
MaterializationsInvalidationCache.get().dropMaterializedView(dbName, tableName);
}
}
}
return success;
}
private boolean dropCreationMetadata(String catName, String dbName, String tableName) throws MetaException,
NoSuchObjectException, InvalidObjectException, InvalidInputException {
boolean success = false;
dbName = normalizeIdentifier(dbName);
tableName = normalizeIdentifier(tableName);
try {
openTransaction();
MCreationMetadata mcm = getCreationMetadata(catName, dbName, tableName);
pm.retrieve(mcm);
if (mcm != null) {
pm.deletePersistentAll(mcm);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
private List listAllTableConstraintsWithOptionalConstraintName(
String catName, String dbName, String tableName, String constraintname) {
catName = normalizeIdentifier(catName);
dbName = normalizeIdentifier(dbName);
tableName = normalizeIdentifier(tableName);
constraintname = constraintname!=null?normalizeIdentifier(constraintname):null;
List mConstraints = null;
List constraintNames = new ArrayList<>();
Query query = null;
try {
query = pm.newQuery("select constraintName from org.apache.hadoop.hive.metastore.model.MConstraint where "
+ "((parentTable.tableName == ptblname && parentTable.database.name == pdbname && " +
"parentTable.database.catalogName == pcatname) || "
+ "(childTable != null && childTable.tableName == ctblname &&" +
"childTable.database.name == cdbname && childTable.database.catalogName == ccatname)) " +
(constraintname != null ? " && constraintName == constraintname" : ""));
query.declareParameters("java.lang.String ptblname, java.lang.String pdbname,"
+ "java.lang.String pcatname, java.lang.String ctblname, java.lang.String cdbname," +
"java.lang.String ccatname" +
(constraintname != null ? ", java.lang.String constraintname" : ""));
Collection> constraintNamesColl =
constraintname != null ?
((Collection>) query.
executeWithArray(tableName, dbName, catName, tableName, dbName, catName, constraintname)):
((Collection>) query.
executeWithArray(tableName, dbName, catName, tableName, dbName, catName));
for (Iterator> i = constraintNamesColl.iterator(); i.hasNext();) {
String currName = (String) i.next();
constraintNames.add(currName);
}
query = pm.newQuery(MConstraint.class);
query.setFilter("param.contains(constraintName)");
query.declareParameters("java.util.Collection param");
Collection> constraints = (Collection>)query.execute(constraintNames);
mConstraints = new ArrayList<>();
for (Iterator> i = constraints.iterator(); i.hasNext();) {
MConstraint currConstraint = (MConstraint) i.next();
mConstraints.add(currConstraint);
}
} finally {
if (query != null) {
query.closeAll();
}
}
return mConstraints;
}
@Override
public Table getTable(String catName, String dbName, String tableName) throws MetaException {
boolean commited = false;
Table tbl = null;
try {
openTransaction();
tbl = convertToTable(getMTable(catName, dbName, tableName));
// Retrieve creation metadata if needed
if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) {
tbl.setCreationMetadata(
convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName)));
}
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
return tbl;
}
@Override
public List getTables(String catName, String dbName, String pattern)
throws MetaException {
return getTables(catName, dbName, pattern, null);
}
@Override
public List getTables(String catName, String dbName, String pattern, TableType tableType)
throws MetaException {
try {
// We only support pattern matching via jdo since pattern matching in Java
// might be different than the one used by the metastore backends
return getTablesInternal(catName, dbName, pattern, tableType,
(pattern == null || pattern.equals(".*")), true);
} catch (NoSuchObjectException e) {
throw new MetaException(ExceptionUtils.getStackTrace(e));
}
}
protected List getTablesInternal(String catName, String dbName, String pattern,
TableType tableType, boolean allowSql, boolean allowJdo)
throws MetaException, NoSuchObjectException {
final String db_name = normalizeIdentifier(dbName);
final String cat_name = normalizeIdentifier(catName);
return new GetListHelper(cat_name, dbName, null, allowSql, allowJdo) {
@Override
protected List getSqlResult(GetHelper> ctx)
throws MetaException {
return directSql.getTables(cat_name, db_name, tableType);
}
@Override
protected List getJdoResult(GetHelper> ctx)
throws MetaException, NoSuchObjectException {
return getTablesInternalViaJdo(cat_name, db_name, pattern, tableType);
}
}.run(false);
}
private List getTablesInternalViaJdo(String catName, String dbName, String pattern,
TableType tableType) throws MetaException {
boolean commited = false;
Query query = null;
List tbls = null;
try {
openTransaction();
dbName = normalizeIdentifier(dbName);
// Take the pattern and split it on the | to get all the composing
// patterns
List parameterVals = new ArrayList<>();
StringBuilder filterBuilder = new StringBuilder();
//adds database.name == dbName to the filter
appendSimpleCondition(filterBuilder, "database.name", new String[] {dbName}, parameterVals);
appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals);
if(pattern != null) {
appendPatternCondition(filterBuilder, "tableName", pattern, parameterVals);
}
if(tableType != null) {
appendPatternCondition(filterBuilder, "tableType", new String[] {tableType.toString()}, parameterVals);
}
query = pm.newQuery(MTable.class, filterBuilder.toString());
query.setResult("tableName");
query.setOrdering("tableName ascending");
Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[0]));
tbls = new ArrayList<>(names);
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
return tbls;
}
@Override
public List getMaterializedViewsForRewriting(String catName, String dbName)
throws MetaException, NoSuchObjectException {
final String db_name = normalizeIdentifier(dbName);
catName = normalizeIdentifier(catName);
boolean commited = false;
Query> query = null;
List tbls = null;
try {
openTransaction();
dbName = normalizeIdentifier(dbName);
query = pm.newQuery(MTable.class,
"database.name == db && database.catalogName == cat && tableType == tt && rewriteEnabled == re");
query.declareParameters(
"java.lang.String db, java.lang.String cat, java.lang.String tt, boolean re");
query.setResult("tableName");
Collection names = (Collection) query.executeWithArray(
db_name, catName, TableType.MATERIALIZED_VIEW.toString(), true);
tbls = new ArrayList<>(names);
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
return tbls;
}
@Override
public int getDatabaseCount() throws MetaException {
return getObjectCount("name", MDatabase.class.getName());
}
@Override
public int getPartitionCount() throws MetaException {
return getObjectCount("partitionName", MPartition.class.getName());
}
@Override
public int getTableCount() throws MetaException {
return getObjectCount("tableName", MTable.class.getName());
}
private int getObjectCount(String fieldName, String objName) {
Long result = 0L;
boolean commited = false;
Query query = null;
try {
openTransaction();
String queryStr =
"select count(" + fieldName + ") from " + objName;
query = pm.newQuery(queryStr);
result = (Long) query.execute();
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
return result.intValue();
}
@Override
public List getTableMeta(String catName, String dbNames, String tableNames,
List tableTypes) throws MetaException {
boolean commited = false;
Query query = null;
List metas = new ArrayList<>();
try {
openTransaction();
// Take the pattern and split it on the | to get all the composing
// patterns
StringBuilder filterBuilder = new StringBuilder();
List parameterVals = new ArrayList<>();
appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals);
if (dbNames != null && !dbNames.equals("*")) {
appendPatternCondition(filterBuilder, "database.name", dbNames, parameterVals);
}
if (tableNames != null && !tableNames.equals("*")) {
appendPatternCondition(filterBuilder, "tableName", tableNames, parameterVals);
}
if (tableTypes != null && !tableTypes.isEmpty()) {
appendSimpleCondition(filterBuilder, "tableType", tableTypes.toArray(new String[0]), parameterVals);
}
if (LOG.isDebugEnabled()) {
LOG.debug("getTableMeta with filter " + filterBuilder.toString() + " params: " +
StringUtils.join(parameterVals, ", "));
}
query = pm.newQuery(MTable.class, filterBuilder.toString());
Collection tables = (Collection) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()]));
for (MTable table : tables) {
TableMeta metaData = new TableMeta(
table.getDatabase().getName(), table.getTableName(), table.getTableType());
metaData.setComments(table.getParameters().get("comment"));
metas.add(metaData);
}
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
return metas;
}
private StringBuilder appendPatternCondition(StringBuilder filterBuilder, String fieldName,
String[] elements, List parameterVals) {
return appendCondition(filterBuilder, fieldName, elements, true, parameterVals);
}
private StringBuilder appendPatternCondition(StringBuilder builder,
String fieldName, String elements, List parameters) {
elements = normalizeIdentifier(elements);
return appendCondition(builder, fieldName, elements.split("\\|"), true, parameters);
}
private StringBuilder appendSimpleCondition(StringBuilder builder,
String fieldName, String[] elements, List parameters) {
return appendCondition(builder, fieldName, elements, false, parameters);
}
private StringBuilder appendCondition(StringBuilder builder,
String fieldName, String[] elements, boolean pattern, List parameters) {
if (builder.length() > 0) {
builder.append(" && ");
}
builder.append(" (");
int length = builder.length();
for (String element : elements) {
if (pattern) {
element = "(?i)" + element.replaceAll("\\*", ".*");
}
parameters.add(element);
if (builder.length() > length) {
builder.append(" || ");
}
builder.append(fieldName);
if (pattern) {
builder.append(".matches(").append(JDO_PARAM).append(parameters.size()).append(")");
} else {
builder.append(" == ").append(JDO_PARAM).append(parameters.size());
}
}
builder.append(" )");
return builder;
}
@Override
public List getAllTables(String catName, String dbName) throws MetaException {
return getTables(catName, dbName, ".*");
}
class AttachedMTableInfo {
MTable mtbl;
MColumnDescriptor mcd;
public AttachedMTableInfo() {}
public AttachedMTableInfo(MTable mtbl, MColumnDescriptor mcd) {
this.mtbl = mtbl;
this.mcd = mcd;
}
}
private AttachedMTableInfo getMTable(String catName, String db, String table,
boolean retrieveCD) {
AttachedMTableInfo nmtbl = new AttachedMTableInfo();
MTable mtbl = null;
boolean commited = false;
Query query = null;
try {
openTransaction();
catName = normalizeIdentifier(catName);
db = normalizeIdentifier(db);
table = normalizeIdentifier(table);
query = pm.newQuery(MTable.class,
"tableName == table && database.name == db && database.catalogName == catname");
query.declareParameters(
"java.lang.String table, java.lang.String db, java.lang.String catname");
query.setUnique(true);
LOG.debug("Executing getMTable for " +
getCatalogQualifiedTableName(catName, db, table));
mtbl = (MTable) query.execute(table, db, catName);
pm.retrieve(mtbl);
// Retrieving CD can be expensive and unnecessary, so do it only when required.
if (mtbl != null && retrieveCD) {
pm.retrieve(mtbl.getSd());
pm.retrieveAll(mtbl.getSd().getCD());
nmtbl.mcd = mtbl.getSd().getCD();
}
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
nmtbl.mtbl = mtbl;
return nmtbl;
}
private MCreationMetadata getCreationMetadata(String catName, String dbName, String tblName) {
boolean commited = false;
MCreationMetadata mcm = null;
Query query = null;
try {
openTransaction();
query = pm.newQuery(
MCreationMetadata.class, "tblName == table && dbName == db && catalogName == cat");
query.declareParameters("java.lang.String table, java.lang.String db, java.lang.String cat");
query.setUnique(true);
mcm = (MCreationMetadata) query.execute(tblName, dbName, catName);
pm.retrieve(mcm);
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
return mcm;
}
private MTable getMTable(String catName, String db, String table) {
AttachedMTableInfo nmtbl = getMTable(catName, db, table, false);
return nmtbl.mtbl;
}
@Override
public List getTableObjectsByName(String catName, String db, List tbl_names)
throws MetaException, UnknownDBException {
List tables = new ArrayList<>();
boolean committed = false;
Query dbExistsQuery = null;
Query query = null;
try {
openTransaction();
db = normalizeIdentifier(db);
catName = normalizeIdentifier(catName);
List lowered_tbl_names = new ArrayList<>(tbl_names.size());
for (String t : tbl_names) {
lowered_tbl_names.add(normalizeIdentifier(t));
}
query = pm.newQuery(MTable.class);
//<<<<<<< HEAD
query.setFilter("database.name == db && database.catalogName == cat && tbl_names.contains(tableName)");
query.declareParameters("java.lang.String db, java.lang.String cat, java.util.Collection tbl_names");
Collection mtables = (Collection) query.execute(db, catName, lowered_tbl_names);
if (mtables == null || mtables.isEmpty()) {
// Need to differentiate between an unmatched pattern and a non-existent database
dbExistsQuery = pm.newQuery(MDatabase.class, "name == db && catalogName == cat");
dbExistsQuery.declareParameters("java.lang.String db, java.lang.String cat");
dbExistsQuery.setUnique(true);
dbExistsQuery.setResult("name");
String dbNameIfExists = (String) dbExistsQuery.execute(db, catName);
if (com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.isEmpty(dbNameIfExists)) {
throw new UnknownDBException("Could not find database " +
getCatalogQualifiedDbName(catName, db));
}
} else {
for (Iterator iter = mtables.iterator(); iter.hasNext(); ) {
Table tbl = convertToTable((MTable) iter.next());
// Retrieve creation metadata if needed
if (TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) {
tbl.setCreationMetadata(
convertToCreationMetadata(
getCreationMetadata(tbl.getCatName(), tbl.getDbName(), tbl.getTableName())));
}
tables.add(tbl);
}
}
committed = commitTransaction();
} finally {
rollbackAndCleanup(committed, query);
if (dbExistsQuery != null) {
dbExistsQuery.closeAll();
}
}
return tables;
}
/** Makes shallow copy of a list to avoid DataNucleus mucking with our objects. */
private List convertList(List dnList) {
return (dnList == null) ? null : Lists.newArrayList(dnList);
}
/** Makes shallow copy of a map to avoid DataNucleus mucking with our objects. */
private Map convertMap(Map dnMap) {
return MetaStoreUtils.trimMapNulls(dnMap,
MetastoreConf.getBoolVar(getConf(), ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS));
}
private Table convertToTable(MTable mtbl) throws MetaException {
if (mtbl == null) {
return null;
}
String tableType = mtbl.getTableType();
if (tableType == null) {
// for backwards compatibility with old metastore persistence
if (mtbl.getViewOriginalText() != null) {
tableType = TableType.VIRTUAL_VIEW.toString();
} else if (Boolean.parseBoolean(mtbl.getParameters().get("EXTERNAL"))) {
tableType = TableType.EXTERNAL_TABLE.toString();
} else {
tableType = TableType.MANAGED_TABLE.toString();
}
}
final Table t = new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), mtbl
.getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl
.getRetention(), convertToStorageDescriptor(mtbl.getSd()),
convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()),
mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType);
if (Strings.isNullOrEmpty(mtbl.getOwnerType())) {
// Before the ownerType exists in an old Hive schema, USER was the default type for owner.
// Let's set the default to USER to keep backward compatibility.
t.setOwnerType(PrincipalType.USER);
} else {
t.setOwnerType(PrincipalType.valueOf(mtbl.getOwnerType()));
}
t.setRewriteEnabled(mtbl.isRewriteEnabled());
t.setCatName(mtbl.getDatabase().getCatalogName());
return t;
}
private MTable convertToMTable(Table tbl) throws InvalidObjectException,
MetaException {
if (tbl == null) {
return null;
}
MDatabase mdb = null;
String catName = tbl.isSetCatName() ? tbl.getCatName() : getDefaultCatalog(conf);
try {
mdb = getMDatabase(catName, tbl.getDbName());
} catch (NoSuchObjectException e) {
LOG.error("Could not convert to MTable", e);
throw new InvalidObjectException("Database " +
getCatalogQualifiedDbName(catName, tbl.getDbName()) + " doesn't exist.");
}
// If the table has property EXTERNAL set, update table type
// accordingly
String tableType = tbl.getTableType();
boolean isExternal = Boolean.parseBoolean(tbl.getParameters().get("EXTERNAL"));
if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
if (isExternal) {
tableType = TableType.EXTERNAL_TABLE.toString();
}
}
if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
if (!isExternal) {
tableType = TableType.MANAGED_TABLE.toString();
}
}
PrincipalType ownerPrincipalType = tbl.getOwnerType();
String ownerType = (ownerPrincipalType == null) ? PrincipalType.USER.name() : ownerPrincipalType.name();
// A new table is always created with a new column descriptor
return new MTable(normalizeIdentifier(tbl.getTableName()), mdb,
convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), ownerType, tbl
.getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(),
convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(),
tbl.getViewOriginalText(), tbl.getViewExpandedText(), tbl.isRewriteEnabled(),
tableType);
}
private List convertToMFieldSchemas(List keys) {
List mkeys = null;
if (keys != null) {
mkeys = new ArrayList<>(keys.size());
for (FieldSchema part : keys) {
mkeys.add(new MFieldSchema(part.getName().toLowerCase(),
part.getType(), part.getComment()));
}
}
return mkeys;
}
private List convertToFieldSchemas(List mkeys) {
List keys = null;
if (mkeys != null) {
keys = new ArrayList<>(mkeys.size());
for (MFieldSchema part : mkeys) {
keys.add(new FieldSchema(part.getName(), part.getType(), part
.getComment()));
}
}
return keys;
}
private List convertToMOrders(List keys) {
List mkeys = null;
if (keys != null) {
mkeys = new ArrayList<>(keys.size());
for (Order part : keys) {
mkeys.add(new MOrder(normalizeIdentifier(part.getCol()), part.getOrder()));
}
}
return mkeys;
}
private List convertToOrders(List mkeys) {
List keys = null;
if (mkeys != null) {
keys = new ArrayList<>(mkeys.size());
for (MOrder part : mkeys) {
keys.add(new Order(part.getCol(), part.getOrder()));
}
}
return keys;
}
private SerDeInfo convertToSerDeInfo(MSerDeInfo ms) throws MetaException {
if (ms == null) {
throw new MetaException("Invalid SerDeInfo object");
}
SerDeInfo serde =
new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters()));
if (ms.getDescription() != null) {
serde.setDescription(ms.getDescription());
}
if (ms.getSerializerClass() != null) {
serde.setSerializerClass(ms.getSerializerClass());
}
if (ms.getDeserializerClass() != null) {
serde.setDeserializerClass(ms.getDeserializerClass());
}
if (ms.getSerdeType() > 0) {
serde.setSerdeType(SerdeType.findByValue(ms.getSerdeType()));
}
return serde;
}
private MSerDeInfo convertToMSerDeInfo(SerDeInfo ms) throws MetaException {
if (ms == null) {
throw new MetaException("Invalid SerDeInfo object");
}
return new MSerDeInfo(ms.getName(), ms.getSerializationLib(), ms.getParameters(),
ms.getDescription(), ms.getSerializerClass(), ms.getDeserializerClass(),
ms.getSerdeType() == null ? 0 : ms.getSerdeType().getValue());
}
/**
* Given a list of model field schemas, create a new model column descriptor.
* @param cols the columns the column descriptor contains
* @return a new column descriptor db-backed object
*/
private MColumnDescriptor createNewMColumnDescriptor(List cols) {
if (cols == null) {
return null;
}
return new MColumnDescriptor(cols);
}
// MSD and SD should be same objects. Not sure how to make then same right now
// MSerdeInfo *& SerdeInfo should be same as well
private StorageDescriptor convertToStorageDescriptor(
MStorageDescriptor msd,
boolean noFS) throws MetaException {
if (msd == null) {
return null;
}
List mFieldSchemas = msd.getCD() == null ? null : msd.getCD().getCols();
StorageDescriptor sd = new StorageDescriptor(noFS ? null : convertToFieldSchemas(mFieldSchemas),
msd.getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd
.isCompressed(), msd.getNumBuckets(), convertToSerDeInfo(msd
.getSerDeInfo()), convertList(msd.getBucketCols()), convertToOrders(msd
.getSortCols()), convertMap(msd.getParameters()));
SkewedInfo skewedInfo = new SkewedInfo(convertList(msd.getSkewedColNames()),
convertToSkewedValues(msd.getSkewedColValues()),
covertToSkewedMap(msd.getSkewedColValueLocationMaps()));
sd.setSkewedInfo(skewedInfo);
sd.setStoredAsSubDirectories(msd.isStoredAsSubDirectories());
return sd;
}
private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd)
throws MetaException {
return convertToStorageDescriptor(msd, false);
}
/**
* Convert a list of MStringList to a list of list string
*
* @param mLists
* @return
*/
private List> convertToSkewedValues(List mLists) {
List> lists = null;
if (mLists != null) {
lists = new ArrayList<>(mLists.size());
for (MStringList element : mLists) {
lists.add(new ArrayList<>(element.getInternalList()));
}
}
return lists;
}
private List convertToMStringLists(List> mLists) {
List lists = null ;
if (null != mLists) {
lists = new ArrayList<>();
for (List mList : mLists) {
lists.add(new MStringList(mList));
}
}
return lists;
}
/**
* Convert a MStringList Map to a Map
* @param mMap
* @return
*/
private Map, String> covertToSkewedMap(Map mMap) {
Map, String> map = null;
if (mMap != null) {
map = new HashMap<>(mMap.size());
Set keys = mMap.keySet();
for (MStringList key : keys) {
map.put(new ArrayList<>(key.getInternalList()), mMap.get(key));
}
}
return map;
}
/**
* Covert a Map to a MStringList Map
* @param mMap
* @return
*/
private Map covertToMapMStringList(Map, String> mMap) {
Map map = null;
if (mMap != null) {
map = new HashMap<>(mMap.size());
Set> keys = mMap.keySet();
for (List key : keys) {
map.put(new MStringList(key), mMap.get(key));
}
}
return map;
}
/**
* Converts a storage descriptor to a db-backed storage descriptor. Creates a
* new db-backed column descriptor object for this SD.
* @param sd the storage descriptor to wrap in a db-backed object
* @return the storage descriptor db-backed object
* @throws MetaException
*/
private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd)
throws MetaException {
if (sd == null) {
return null;
}
MColumnDescriptor mcd = createNewMColumnDescriptor(convertToMFieldSchemas(sd.getCols()));
return convertToMStorageDescriptor(sd, mcd);
}
/**
* Converts a storage descriptor to a db-backed storage descriptor. It points the
* storage descriptor's column descriptor to the one passed as an argument,
* so it does not create a new mcolumn descriptor object.
* @param sd the storage descriptor to wrap in a db-backed object
* @param mcd the db-backed column descriptor
* @return the db-backed storage descriptor object
* @throws MetaException
*/
private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd,
MColumnDescriptor mcd) throws MetaException {
if (sd == null) {
return null;
}
return new MStorageDescriptor(mcd, sd
.getLocation(), sd.getInputFormat(), sd.getOutputFormat(), sd
.isCompressed(), sd.getNumBuckets(), convertToMSerDeInfo(sd
.getSerdeInfo()), sd.getBucketCols(),
convertToMOrders(sd.getSortCols()), sd.getParameters(),
(null == sd.getSkewedInfo()) ? null
: sd.getSkewedInfo().getSkewedColNames(),
convertToMStringLists((null == sd.getSkewedInfo()) ? null : sd.getSkewedInfo()
.getSkewedColValues()),
covertToMapMStringList((null == sd.getSkewedInfo()) ? null : sd.getSkewedInfo()
.getSkewedColValueLocationMaps()), sd.isStoredAsSubDirectories());
}
private MCreationMetadata convertToMCreationMetadata(
CreationMetadata m) throws MetaException {
if (m == null) {
return null;
}
Set tablesUsed = new HashSet<>();
for (String fullyQualifiedName : m.getTablesUsed()) {
String[] names = fullyQualifiedName.split("\\.");
tablesUsed.add(getMTable(m.getCatName(), names[0], names[1], false).mtbl);
}
return new MCreationMetadata(m.getCatName(), m.getDbName(), m.getTblName(),
tablesUsed, m.getValidTxnList());
}
private CreationMetadata convertToCreationMetadata(
MCreationMetadata s) throws MetaException {
if (s == null) {
return null;
}
Set tablesUsed = new HashSet<>();
for (MTable mtbl : s.getTables()) {
tablesUsed.add(
Warehouse.getQualifiedName(
mtbl.getDatabase().getName(), mtbl.getTableName()));
}
CreationMetadata r = new CreationMetadata(s.getCatalogName(),
s.getDbName(), s.getTblName(), tablesUsed);
if (s.getTxnList() != null) {
r.setValidTxnList(s.getTxnList());
}
return r;
}
@Override
public boolean addPartitions(String catName, String dbName, String tblName, List parts)
throws InvalidObjectException, MetaException {
boolean success = false;
openTransaction();
try {
List tabGrants = null;
List tabColumnGrants = null;
MTable table = this.getMTable(catName, dbName, tblName);
if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
tabGrants = this.listAllTableGrants(catName, dbName, tblName);
tabColumnGrants = this.listTableAllColumnGrants(catName, dbName, tblName);
}
List toPersist = new ArrayList<>();
for (Partition part : parts) {
if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
throw new MetaException("Partition does not belong to target table "
+ dbName + "." + tblName + ": " + part);
}
MPartition mpart = convertToMPart(part, true);
toPersist.add(mpart);
int now = (int)(System.currentTimeMillis()/1000);
if (tabGrants != null) {
for (MTablePrivilege tab: tabGrants) {
toPersist.add(new MPartitionPrivilege(tab.getPrincipalName(),
tab.getPrincipalType(), mpart, tab.getPrivilege(), now,
tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption()));
}
}
if (tabColumnGrants != null) {
for (MTableColumnPrivilege col : tabColumnGrants) {
toPersist.add(new MPartitionColumnPrivilege(col.getPrincipalName(),
col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(),
now, col.getGrantor(), col.getGrantorType(), col.getGrantOption()));
}
}
}
if (CollectionUtils.isNotEmpty(toPersist)) {
pm.makePersistentAll(toPersist);
pm.flush();
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
private boolean isValidPartition(
Partition part, boolean ifNotExists) throws MetaException {
MetaStoreUtils.validatePartitionNameCharacters(part.getValues(),
partitionValidationPattern);
boolean doesExist = doesPartitionExist(part.getCatName(),
part.getDbName(), part.getTableName(), part.getValues());
if (doesExist && !ifNotExists) {
throw new MetaException("Partition already exists: " + part);
}
return !doesExist;
}
@Override
public boolean addPartitions(String catName, String dbName, String tblName,
PartitionSpecProxy partitionSpec, boolean ifNotExists)
throws InvalidObjectException, MetaException {
boolean success = false;
openTransaction();
try {
List tabGrants = null;
List tabColumnGrants = null;
MTable table = this.getMTable(catName, dbName, tblName);
if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
tabGrants = this.listAllTableGrants(catName, dbName, tblName);
tabColumnGrants = this.listTableAllColumnGrants(catName, dbName, tblName);
}
if (!partitionSpec.getTableName().equals(tblName) || !partitionSpec.getDbName().equals(dbName)) {
throw new MetaException("Partition does not belong to target table "
+ dbName + "." + tblName + ": " + partitionSpec);
}
PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator();
int now = (int)(System.currentTimeMillis()/1000);
while (iterator.hasNext()) {
Partition part = iterator.next();
if (isValidPartition(part, ifNotExists)) {
MPartition mpart = convertToMPart(part, true);
pm.makePersistent(mpart);
if (tabGrants != null) {
for (MTablePrivilege tab : tabGrants) {
pm.makePersistent(new MPartitionPrivilege(tab.getPrincipalName(),
tab.getPrincipalType(), mpart, tab.getPrivilege(), now,
tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption()));
}
}
if (tabColumnGrants != null) {
for (MTableColumnPrivilege col : tabColumnGrants) {
pm.makePersistent(new MPartitionColumnPrivilege(col.getPrincipalName(),
col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(),
now, col.getGrantor(), col.getGrantorType(), col.getGrantOption()));
}
}
}
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
@Override
public boolean addPartition(Partition part) throws InvalidObjectException,
MetaException {
boolean success = false;
boolean commited = false;
try {
String catName = part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf);
MTable table = this.getMTable(catName, part.getDbName(), part.getTableName());
List tabGrants = null;
List tabColumnGrants = null;
if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
tabGrants = this.listAllTableGrants(catName, part.getDbName(), part.getTableName());
tabColumnGrants = this.listTableAllColumnGrants(
catName, part.getDbName(), part.getTableName());
}
openTransaction();
MPartition mpart = convertToMPart(part, true);
pm.makePersistent(mpart);
int now = (int)(System.currentTimeMillis()/1000);
List toPersist = new ArrayList<>();
if (tabGrants != null) {
for (MTablePrivilege tab: tabGrants) {
MPartitionPrivilege partGrant = new MPartitionPrivilege(tab
.getPrincipalName(), tab.getPrincipalType(),
mpart, tab.getPrivilege(), now, tab.getGrantor(), tab
.getGrantorType(), tab.getGrantOption());
toPersist.add(partGrant);
}
}
if (tabColumnGrants != null) {
for (MTableColumnPrivilege col : tabColumnGrants) {
MPartitionColumnPrivilege partColumn = new MPartitionColumnPrivilege(col
.getPrincipalName(), col.getPrincipalType(), mpart, col
.getColumnName(), col.getPrivilege(), now, col.getGrantor(), col
.getGrantorType(), col.getGrantOption());
toPersist.add(partColumn);
}
if (CollectionUtils.isNotEmpty(toPersist)) {
pm.makePersistentAll(toPersist);
}
}
commited = commitTransaction();
success = true;
} finally {
if (!commited) {
rollbackTransaction();
}
}
return success;
}
@Override
public Partition getPartition(String catName, String dbName, String tableName,
List part_vals) throws NoSuchObjectException, MetaException {
openTransaction();
Partition part = convertToPart(getMPartition(catName, dbName, tableName, part_vals));
commitTransaction();
if(part == null) {
throw new NoSuchObjectException("partition values="
+ part_vals.toString());
}
part.setValues(part_vals);
return part;
}
private MPartition getMPartition(String catName, String dbName, String tableName, List part_vals)
throws MetaException {
List mparts = null;
MPartition ret = null;
boolean commited = false;
Query query = null;
try {
openTransaction();
catName = normalizeIdentifier(catName);
dbName = normalizeIdentifier(dbName);
tableName = normalizeIdentifier(tableName);
MTable mtbl = getMTable(catName, dbName, tableName);
if (mtbl == null) {
commited = commitTransaction();
return null;
}
// Change the query to use part_vals instead of the name which is
// redundant TODO: callers of this often get part_vals out of name for no reason...
String name =
Warehouse.makePartName(convertToFieldSchemas(mtbl.getPartitionKeys()), part_vals);
query =
pm.newQuery(MPartition.class,
"table.tableName == t1 && table.database.name == t2 && partitionName == t3 " +
" && table.database.catalogName == t4");
query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, "
+ "java.lang.String t4");
mparts = (List) query.executeWithArray(tableName, dbName, name, catName);
pm.retrieveAll(mparts);
commited = commitTransaction();
// We need to compare partition name with requested name since some DBs
// (like MySQL, Derby) considers 'a' = 'a ' whereas others like (Postgres,
// Oracle) doesn't exhibit this problem.
if (CollectionUtils.isNotEmpty(mparts)) {
if (mparts.size() > 1) {
throw new MetaException(
"Expecting only one partition but more than one partitions are found.");
} else {
MPartition mpart = mparts.get(0);
if (name.equals(mpart.getPartitionName())) {
ret = mpart;
} else {
throw new MetaException("Expecting a partition with name " + name
+ ", but metastore is returning a partition with name " + mpart.getPartitionName()
+ ".");
}
}
}
} finally {
rollbackAndCleanup(commited, query);
}
return ret;
}
/**
* Convert a Partition object into an MPartition, which is an object backed by the db
* If the Partition's set of columns is the same as the parent table's AND useTableCD
* is true, then this partition's storage descriptor's column descriptor will point
* to the same one as the table's storage descriptor.
* @param part the partition to convert
* @param useTableCD whether to try to use the parent table's column descriptor.
* @return the model partition object
* @throws InvalidObjectException
* @throws MetaException
*/
private MPartition convertToMPart(Partition part, boolean useTableCD)
throws InvalidObjectException, MetaException {
if (part == null) {
return null;
}
MTable mt = getMTable(part.getCatName(), part.getDbName(), part.getTableName());
if (mt == null) {
throw new InvalidObjectException(
"Partition doesn't have a valid table or database name");
}
// If this partition's set of columns is the same as the parent table's,
// use the parent table's, so we do not create a duplicate column descriptor,
// thereby saving space
MStorageDescriptor msd;
if (useTableCD &&
mt.getSd() != null && mt.getSd().getCD() != null &&
mt.getSd().getCD().getCols() != null &&
part.getSd() != null &&
convertToFieldSchemas(mt.getSd().getCD().getCols()).
equals(part.getSd().getCols())) {
msd = convertToMStorageDescriptor(part.getSd(), mt.getSd().getCD());
} else {
msd = convertToMStorageDescriptor(part.getSd());
}
return new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt
.getPartitionKeys()), part.getValues()), mt, part.getValues(), part
.getCreateTime(), part.getLastAccessTime(),
msd, part.getParameters());
}
private Partition convertToPart(MPartition mpart) throws MetaException {
if (mpart == null) {
return null;
}
Partition p = new Partition(convertList(mpart.getValues()), mpart.getTable().getDatabase()
.getName(), mpart.getTable().getTableName(), mpart.getCreateTime(),
mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd()),
convertMap(mpart.getParameters()));
p.setCatName(mpart.getTable().getDatabase().getCatalogName());
return p;
}
private Partition convertToPart(String catName, String dbName, String tblName, MPartition mpart)
throws MetaException {
if (mpart == null) {
return null;
}
Partition p = new Partition(convertList(mpart.getValues()), dbName, tblName,
mpart.getCreateTime(), mpart.getLastAccessTime(),
convertToStorageDescriptor(mpart.getSd(), false), convertMap(mpart.getParameters()));
p.setCatName(catName);
return p;
}
@Override
public boolean dropPartition(String catName, String dbName, String tableName,
List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException,
InvalidInputException {
boolean success = false;
try {
openTransaction();
MPartition part = getMPartition(catName, dbName, tableName, part_vals);
dropPartitionCommon(part);
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
@Override
public void dropPartitions(String catName, String dbName, String tblName, List partNames)
throws MetaException, NoSuchObjectException {
if (CollectionUtils.isEmpty(partNames)) {
return;
}
boolean success = false;
openTransaction();
try {
// Delete all things.
dropPartitionGrantsNoTxn(catName, dbName, tblName, partNames);
dropPartitionAllColumnGrantsNoTxn(catName, dbName, tblName, partNames);
dropPartitionColumnStatisticsNoTxn(catName, dbName, tblName, partNames);
// CDs are reused; go thry partition SDs, detach all CDs from SDs, then remove unused CDs.
for (MColumnDescriptor mcd : detachCdsFromSdsNoTxn(catName, dbName, tblName, partNames)) {
removeUnusedColumnDescriptor(mcd);
}
dropPartitionsNoTxn(catName, dbName, tblName, partNames);
if (!(success = commitTransaction())) {
throw new MetaException("Failed to drop partitions"); // Should not happen?
}
} finally {
if (!success) {
rollbackTransaction();
}
}
}
/**
* Drop an MPartition and cascade deletes (e.g., delete partition privilege grants,
* drop the storage descriptor cleanly, etc.)
*/
private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectException, MetaException,
InvalidObjectException, InvalidInputException {
boolean success = false;
try {
openTransaction();
if (part != null) {
List schemas = part.getTable().getPartitionKeys();
List colNames = new ArrayList<>();
for (MFieldSchema col: schemas) {
colNames.add(col.getName());
}
String partName = FileUtils.makePartName(colNames, part.getValues());
List partGrants = listPartitionGrants(
part.getTable().getDatabase().getCatalogName(),
part.getTable().getDatabase().getName(),
part.getTable().getTableName(),
Lists.newArrayList(partName));
if (CollectionUtils.isNotEmpty(partGrants)) {
pm.deletePersistentAll(partGrants);
}
List partColumnGrants = listPartitionAllColumnGrants(
part.getTable().getDatabase().getCatalogName(),
part.getTable().getDatabase().getName(),
part.getTable().getTableName(),
Lists.newArrayList(partName));
if (CollectionUtils.isNotEmpty(partColumnGrants)) {
pm.deletePersistentAll(partColumnGrants);
}
String catName = part.getTable().getDatabase().getCatalogName();
String dbName = part.getTable().getDatabase().getName();
String tableName = part.getTable().getTableName();
// delete partition level column stats if it exists
try {
deletePartitionColumnStatistics(catName, dbName, tableName, partName, part.getValues(), null);
} catch (NoSuchObjectException e) {
LOG.info("No column statistics records found to delete");
}
preDropStorageDescriptor(part.getSd());
pm.deletePersistent(part);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
@Override
public List getPartitions(String catName, String dbName, String tableName,
int maxParts) throws MetaException, NoSuchObjectException {
return getPartitionsInternal(catName, dbName, tableName, maxParts, true, true);
}
protected List getPartitionsInternal(String catName, String dbName, String tblName,
final int maxParts, boolean allowSql, boolean allowJdo)
throws MetaException, NoSuchObjectException {
return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) {
@Override
protected List getSqlResult(GetHelper> ctx) throws MetaException {
Integer max = (maxParts < 0) ? null : maxParts;
return directSql.getPartitions(catName, dbName, tblName, max);
}
@Override
protected List getJdoResult(
GetHelper> ctx) throws MetaException {
QueryWrapper queryWrapper = new QueryWrapper();
try {
return convertToParts(listMPartitions(catName, dbName, tblName, maxParts, queryWrapper));
} finally {
queryWrapper.close();
}
}
}.run(false);
}
@Override
public List getPartitionsWithAuth(String catName, String dbName, String tblName,
short max, String userName, List groupNames)
throws MetaException, InvalidObjectException {
boolean success = false;
QueryWrapper queryWrapper = new QueryWrapper();
try {
openTransaction();
List mparts = listMPartitions(catName, dbName, tblName, max, queryWrapper);
List parts = new ArrayList<>(mparts.size());
if (CollectionUtils.isNotEmpty(mparts)) {
for (MPartition mpart : mparts) {
MTable mtbl = mpart.getTable();
Partition part = convertToPart(mpart);
parts.add(part);
if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl
.getPartitionKeys()), part.getValues());
PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(catName, dbName,
tblName, partName, userName, groupNames);
part.setPrivileges(partAuth);
}
}
}
success = commitTransaction();
return parts;
} finally {
rollbackAndCleanup(success, queryWrapper);
}
}
@Override
public Partition getPartitionWithAuth(String catName, String dbName, String tblName,
List partVals, String user_name, List group_names)
throws NoSuchObjectException, MetaException, InvalidObjectException {
boolean success = false;
try {
openTransaction();
MPartition mpart = getMPartition(catName, dbName, tblName, partVals);
if (mpart == null) {
commitTransaction();
throw new NoSuchObjectException("partition values="
+ partVals.toString());
}
Partition part = null;
MTable mtbl = mpart.getTable();
part = convertToPart(mpart);
if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl
.getPartitionKeys()), partVals);
PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(catName, dbName,
tblName, partName, user_name, group_names);
part.setPrivileges(partAuth);
}
success = commitTransaction();
return part;
} finally {
if (!success) {
rollbackTransaction();
}
}
}
private List convertToParts(List mparts) throws MetaException {
return convertToParts(mparts, null);
}
private List convertToParts(List src, List dest)
throws MetaException {
if (src == null) {
return dest;
}
if (dest == null) {
dest = new ArrayList<>(src.size());
}
for (MPartition mp : src) {
dest.add(convertToPart(mp));
Deadline.checkTimeout();
}
return dest;
}
private List convertToParts(String catName, String dbName, String tblName, List mparts)
throws MetaException {
List parts = new ArrayList<>(mparts.size());
for (MPartition mp : mparts) {
parts.add(convertToPart(catName, dbName, tblName, mp));
Deadline.checkTimeout();
}
return parts;
}
// TODO:pc implement max
@Override
public List listPartitionNames(String catName, String dbName, String tableName,
short max) throws MetaException {
List pns = null;
boolean success = false;
try {
openTransaction();
LOG.debug("Executing getPartitionNames");
pns = getPartitionNamesNoTxn(catName, dbName, tableName, max);
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return pns;
}
private String extractPartitionKey(FieldSchema key, List pkeys) {
StringBuilder buffer = new StringBuilder(256);
assert pkeys.size() >= 1;
String partKey = "/" + key.getName() + "=";
// Table is partitioned by single key
if (pkeys.size() == 1 && (pkeys.get(0).getName().matches(key.getName()))) {
buffer.append("partitionName.substring(partitionName.indexOf(\"")
.append(key.getName()).append("=\") + ").append(key.getName().length() + 1)
.append(")");
// First partition key - anything between key= and first /
} else if ((pkeys.get(0).getName().matches(key.getName()))) {
buffer.append("partitionName.substring(partitionName.indexOf(\"")
.append(key.getName()).append("=\") + ").append(key.getName().length() + 1).append(", ")
.append("partitionName.indexOf(\"/\")")
.append(")");
// Last partition key - anything between /key= and end
} else if ((pkeys.get(pkeys.size() - 1).getName().matches(key.getName()))) {
buffer.append("partitionName.substring(partitionName.indexOf(\"")
.append(partKey).append("\") + ").append(partKey.length())
.append(")");
// Intermediate key - anything between /key= and the following /
} else {
buffer.append("partitionName.substring(partitionName.indexOf(\"")
.append(partKey).append("\") + ").append(partKey.length()).append(", ")
.append("partitionName.indexOf(\"/\", partitionName.indexOf(\"").append(partKey)
.append("\") + 1))");
}
LOG.info("Query for Key:" + key.getName() + " is :" + buffer);
return buffer.toString();
}
@Override
public PartitionValuesResponse listPartitionValues(String catName, String dbName,
String tableName, List cols,
boolean applyDistinct, String filter,
boolean ascending, List order,
long maxParts) throws MetaException {
catName = normalizeIdentifier(catName);
dbName = dbName.toLowerCase().trim();
tableName = tableName.toLowerCase().trim();
try {
if (filter == null || filter.isEmpty()) {
PartitionValuesResponse response = getDistinctValuesForPartitionsNoTxn(catName, dbName,
tableName, cols, applyDistinct, maxParts);
LOG.info("Number of records fetched: {}", response.getPartitionValues().size());
return response;
} else {
PartitionValuesResponse response =
extractPartitionNamesByFilter(catName, dbName, tableName, filter, cols, ascending, maxParts);
if (response != null && response.getPartitionValues() != null) {
LOG.info("Number of records fetched with filter: {}", response.getPartitionValues().size());
}
return response;
}
} catch (Exception t) {
LOG.error("Exception in ORM", t);
throw new MetaException("Error retrieving partition values: " + t);
}
}
private PartitionValuesResponse extractPartitionNamesByFilter(
String catName, String dbName, String tableName, String filter, List cols,
boolean ascending, long maxParts)
throws MetaException, NoSuchObjectException {
LOG.info("Table: {} filter: \"{}\" cols: {}",
getCatalogQualifiedTableName(catName, dbName, tableName), filter, cols);
List partitionNames = null;
List partitions = null;
Table tbl = getTable(catName, dbName, tableName);
try {
// Get partitions by name - ascending or descending
partitionNames = getPartitionNamesByFilter(catName, dbName, tableName, filter, ascending,
maxParts);
} catch (MetaException e) {
LOG.warn("Querying by partition names failed, trying out with partition objects, filter: {}", filter);
}
if (partitionNames == null) {
partitions = getPartitionsByFilter(catName, dbName, tableName, filter, (short) maxParts);
}
if (partitions != null) {
partitionNames = new ArrayList<>(partitions.size());
for (Partition partition : partitions) {
// Check for NULL's just to be safe
if (tbl.getPartitionKeys() != null && partition.getValues() != null) {
partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), partition.getValues()));
}
}
}
if (partitionNames == null && partitions == null) {
throw new MetaException("Cannot obtain list of partitions by filter:\"" + filter +
"\" for " + getCatalogQualifiedTableName(catName, dbName, tableName));
}
if (!ascending) {
Collections.sort(partitionNames, Collections.reverseOrder());
}
// Return proper response
PartitionValuesResponse response = new PartitionValuesResponse();
response.setPartitionValues(new ArrayList<>(partitionNames.size()));
LOG.info("Converting responses to Partition values for items: {}", partitionNames.size());
for (String partName : partitionNames) {
ArrayList vals = new ArrayList(Collections.nCopies(tbl.getPartitionKeys().size(), null));
PartitionValuesRow row = new PartitionValuesRow();
Warehouse.makeValsFromName(partName, vals);
for (String value : vals) {
row.addToRow(value);
}
response.addToPartitionValues(row);
}
return response;
}
private List getPartitionNamesByFilter(String catName, String dbName, String tableName,
String filter, boolean ascending, long maxParts)
throws MetaException {
boolean success = false;
List partNames = new ArrayList<>();
try {
openTransaction();
LOG.debug("Executing getPartitionNamesByFilter");
catName = normalizeIdentifier(catName);
dbName = dbName.toLowerCase();
tableName = tableName.toLowerCase();
MTable mtable = getMTable(catName, dbName, tableName);
if( mtable == null ) {
// To be consistent with the behavior of listPartitionNames, if the
// table or db does not exist, we return an empty list
return partNames;
}
Map params = new HashMap<>();
String queryFilterString = makeQueryFilterString(catName, dbName, mtable, filter, params);
Query query = pm.newQuery(
"select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
+ "where " + queryFilterString);
if (maxParts >= 0) {
//User specified a row limit, set it on the Query
query.setRange(0, maxParts);
}
LOG.debug("Filter specified is {}, JDOQL filter is {}", filter,
queryFilterString);
LOG.debug("Parms is {}", params);
String parameterDeclaration = makeParameterDeclarationStringObj(params);
query.declareParameters(parameterDeclaration);
if (ascending) {
query.setOrdering("partitionName ascending");
} else {
query.setOrdering("partitionName descending");
}
query.setResult("partitionName");
Collection names = (Collection) query.executeWithMap(params);
partNames = new ArrayList(names);
LOG.debug("Done executing query for getPartitionNamesByFilter");
success = commitTransaction();
LOG.debug("Done retrieving all objects for getPartitionNamesByFilter, size: {}", partNames.size());
query.closeAll();
} finally {
if (!success) {
rollbackTransaction();
}
}
return partNames;
}
private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(
String catName, String dbName, String tableName, List cols,
boolean applyDistinct, long maxParts)
throws MetaException {
try {
openTransaction();
Query q = pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
+ "where table.database.name == t1 && table.database.catalogName == t2 && " +
"table.tableName == t3 ");
q.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
// TODO: Ordering seems to affect the distinctness, needs checking, disabling.
/*
if (ascending) {
q.setOrdering("partitionName ascending");
} else {
q.setOrdering("partitionName descending");
}
*/
if (maxParts > 0) {
q.setRange(0, maxParts);
}
StringBuilder partValuesSelect = new StringBuilder(256);
if (applyDistinct) {
partValuesSelect.append("DISTINCT ");
}
List partitionKeys = getTable(catName, dbName, tableName).getPartitionKeys();
for (FieldSchema key : cols) {
partValuesSelect.append(extractPartitionKey(key, partitionKeys)).append(", ");
}
partValuesSelect.setLength(partValuesSelect.length() - 2);
LOG.info("Columns to be selected from Partitions: {}", partValuesSelect);
q.setResult(partValuesSelect.toString());
PartitionValuesResponse response = new PartitionValuesResponse();
response.setPartitionValues(new ArrayList<>());
if (cols.size() > 1) {
List results = (List) q.execute(dbName, catName, tableName);
for (Object[] row : results) {
PartitionValuesRow rowResponse = new PartitionValuesRow();
for (Object columnValue : row) {
rowResponse.addToRow((String) columnValue);
}
response.addToPartitionValues(rowResponse);
}
} else {
List results = (List) q.execute(dbName, catName, tableName);
for (Object row : results) {
PartitionValuesRow rowResponse = new PartitionValuesRow();
rowResponse.addToRow((String) row);
response.addToPartitionValues(rowResponse);
}
}
q.closeAll();
return response;
} finally {
commitTransaction();
}
}
private List getPartitionNamesNoTxn(String catName, String dbName, String tableName, short max) {
List pns = new ArrayList<>();
if (max == 0) {
return pns;
}
catName = normalizeIdentifier(catName);
dbName = normalizeIdentifier(dbName);
tableName = normalizeIdentifier(tableName);
Query query =
pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
+ "where table.database.name == t1 && table.tableName == t2 && table.database.catalogName == t3 "
+ "order by partitionName asc");
query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
query.setResult("partitionName");
if (max > 0) {
query.setRange(0, max);
}
Collection names = (Collection) query.execute(dbName, tableName, catName);
pns.addAll(names);
if (query != null) {
query.closeAll();
}
return pns;
}
/**
* Retrieves a Collection of partition-related results from the database that match
* the partial specification given for a specific table.
* @param dbName the name of the database
* @param tableName the name of the table
* @param part_vals the partial specification values
* @param max_parts the maximum number of partitions to return
* @param resultsCol the metadata column of the data to return, e.g. partitionName, etc.
* if resultsCol is empty or null, a collection of MPartition objects is returned
* @throws NoSuchObjectException
* @results A Collection of partition-related items from the db that match the partial spec
* for a table. The type of each item in the collection corresponds to the column
* you want results for. E.g., if resultsCol is partitionName, the Collection
* has types of String, and if resultsCol is null, the types are MPartition.
*/
private Collection getPartitionPsQueryResults(String catName, String dbName, String tableName,
List part_vals, short max_parts, String resultsCol, QueryWrapper queryWrapper)
throws MetaException, NoSuchObjectException {
catName = normalizeIdentifier(catName);
dbName = normalizeIdentifier(dbName);
tableName = normalizeIdentifier(tableName);
Table table = getTable(catName, dbName, tableName);
if (table == null) {
throw new NoSuchObjectException(getCatalogQualifiedTableName(catName, dbName, tableName)
+ " table not found");
}
String partNameMatcher = MetaStoreUtils.makePartNameMatcher(table, part_vals);
Query query = queryWrapper.query = pm.newQuery(MPartition.class);
StringBuilder queryFilter = new StringBuilder("table.database.name == dbName");
queryFilter.append(" && table.database.catalogName == catName");
queryFilter.append(" && table.tableName == tableName");
queryFilter.append(" && partitionName.matches(partialRegex)");
query.setFilter(queryFilter.toString());
query.declareParameters("java.lang.String dbName, java.lang.String catName, "
+ "java.lang.String tableName, java.lang.String partialRegex");
if (max_parts >= 0) {
// User specified a row limit, set it on the Query
query.setRange(0, max_parts);
}
if (resultsCol != null && !resultsCol.isEmpty()) {
query.setResult(resultsCol);
}
return (Collection) query.executeWithArray(dbName, catName, tableName, partNameMatcher);
}
@Override
public List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name,
List part_vals, short max_parts, String userName, List groupNames)
throws MetaException, InvalidObjectException, NoSuchObjectException {
List partitions = new ArrayList<>();
boolean success = false;
QueryWrapper queryWrapper = new QueryWrapper();
try {
openTransaction();
LOG.debug("executing listPartitionNamesPsWithAuth");
Collection parts = getPartitionPsQueryResults(catName, db_name, tbl_name,
part_vals, max_parts, null, queryWrapper);
MTable mtbl = getMTable(catName, db_name, tbl_name);
for (Object o : parts) {
Partition part = convertToPart((MPartition) o);
//set auth privileges
if (null != userName && null != groupNames &&
"TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl
.getPartitionKeys()), part.getValues());
PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(catName, db_name,
tbl_name, partName, userName, groupNames);
part.setPrivileges(partAuth);
}
partitions.add(part);
}
success = commitTransaction();
} finally {
rollbackAndCleanup(success, queryWrapper);
}
return partitions;
}
@Override
public List listPartitionNamesPs(String catName, String dbName, String tableName,
List part_vals, short max_parts) throws MetaException, NoSuchObjectException {
List partitionNames = new ArrayList<>();
boolean success = false;
QueryWrapper queryWrapper = new QueryWrapper();
try {
openTransaction();
LOG.debug("Executing listPartitionNamesPs");
Collection names = getPartitionPsQueryResults(catName, dbName, tableName,
part_vals, max_parts, "partitionName", queryWrapper);
partitionNames.addAll(names);
success = commitTransaction();
} finally {
rollbackAndCleanup(success, queryWrapper);
}
return partitionNames;
}
// TODO:pc implement max
private List listMPartitions(String catName, String dbName, String tableName,
int max, QueryWrapper queryWrapper) {
boolean success = false;
List mparts = null;
try {
openTransaction();
LOG.debug("Executing listMPartitions");
dbName = normalizeIdentifier(dbName);
tableName = normalizeIdentifier(tableName);
Query query = queryWrapper.query = pm.newQuery(MPartition.class,
"table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3");
query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
query.setOrdering("partitionName ascending");
if (max > 0) {
query.setRange(0, max);
}
mparts = (List) query.execute(tableName, dbName, catName);
LOG.debug("Done executing query for listMPartitions");
pm.retrieveAll(mparts);
success = commitTransaction();
LOG.debug("Done retrieving all objects for listMPartitions {}", mparts);
} finally {
if (!success) {
rollbackTransaction();
}
}
return mparts;
}
@Override
public List getPartitionsByNames(String catName, String dbName, String tblName,
List partNames) throws MetaException, NoSuchObjectException {
return getPartitionsByNamesInternal(catName, dbName, tblName, partNames, true, true);
}
protected List getPartitionsByNamesInternal(String catName, String dbName,
String tblName,
final List partNames,
boolean allowSql, boolean allowJdo)
throws MetaException, NoSuchObjectException {
return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) {
@Override
protected List getSqlResult(GetHelper> ctx) throws MetaException {
return directSql.getPartitionsViaSqlFilter(catName, dbName, tblName, partNames);
}
@Override
protected List getJdoResult(
GetHelper> ctx) throws MetaException, NoSuchObjectException {
return getPartitionsViaOrmFilter(catName, dbName, tblName, partNames);
}
}.run(false);
}
@Override
public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr,
String defaultPartitionName, short maxParts, List result) throws TException {
return getPartitionsByExprInternal(
catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, true, true);
}
protected boolean getPartitionsByExprInternal(String catName, String dbName, String tblName, final byte[] expr,
final String defaultPartitionName, final short maxParts, List result,
boolean allowSql, boolean allowJdo) throws TException {
assert result != null;
final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr);
final AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false);
result.addAll(new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) {
@Override
protected List getSqlResult(GetHelper> ctx) throws MetaException {
// If we have some sort of expression tree, try SQL filter pushdown.
List result = null;
if (exprTree != null) {
SqlFilterForPushdown filter = new SqlFilterForPushdown();
if (directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, filter)) {
return directSql.getPartitionsViaSqlFilter(filter, null);
}
}
// We couldn't do SQL filter pushdown. Get names via normal means.
List partNames = new LinkedList<>();
hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn(
ctx.getTable(), expr, defaultPartitionName, maxParts, partNames));
return directSql.getPartitionsViaSqlFilter(catName, dbName, tblName, partNames);
}
@Override
protected List getJdoResult(
GetHelper> ctx) throws MetaException, NoSuchObjectException {
// If we have some sort of expression tree, try JDOQL filter pushdown.
List result = null;
if (exprTree != null) {
result = getPartitionsViaOrmFilter(ctx.getTable(), exprTree, maxParts, false);
}
if (result == null) {
// We couldn't do JDOQL filter pushdown. Get names via normal means.
List partNames = new ArrayList<>();
hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn(
ctx.getTable(), expr, defaultPartitionName, maxParts, partNames));
result = getPartitionsViaOrmFilter(catName, dbName, tblName, partNames);
}
return result;
}
}.run(true));
return hasUnknownPartitions.get();
}
/**
* Gets the partition names from a table, pruned using an expression.
* @param table Table.
* @param expr Expression.
* @param defaultPartName Default partition name from job config, if any.
* @param maxParts Maximum number of partition names to return.
* @param result The resulting names.
* @return Whether the result contains any unknown partitions.
*/
private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr,
String defaultPartName, short maxParts, List result) throws MetaException {
result.addAll(getPartitionNamesNoTxn(table.getCatName(),
table.getDbName(), table.getTableName(), maxParts));
if (defaultPartName == null || defaultPartName.isEmpty()) {
defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME);
}
return expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), expr, defaultPartName, result);
}
/**
* Gets partition names from the table via ORM (JDOQL) filter pushdown.
* @param table The table.
* @param tree The expression tree from which JDOQL filter will be made.
* @param maxParts Maximum number of partitions to return.
* @param isValidatedFilter Whether the filter was pre-validated for JDOQL pushdown by a client
* (old hive client or non-hive one); if it was and we fail to create a filter, we will throw.
* @return Resulting partitions. Can be null if isValidatedFilter is false, and
* there was error deriving the JDO filter.
*/
private List getPartitionsViaOrmFilter(Table table, ExpressionTree tree,
short maxParts, boolean isValidatedFilter) throws MetaException {
Map params = new HashMap<>();
String jdoFilter =
makeQueryFilterString(table.getCatName(), table.getDbName(), table, tree, params, isValidatedFilter);
if (jdoFilter == null) {
assert !isValidatedFilter;
return null;
}
Query query = pm.newQuery(MPartition.class, jdoFilter);
if (maxParts >= 0) {
// User specified a row limit, set it on the Query
query.setRange(0, maxParts);
}
String parameterDeclaration = makeParameterDeclarationStringObj(params);
query.declareParameters(parameterDeclaration);
query.setOrdering("partitionName ascending");
@SuppressWarnings("unchecked")
List mparts = (List) query.executeWithMap(params);
LOG.debug("Done executing query for getPartitionsViaOrmFilter");
pm.retrieveAll(mparts); // TODO: why is this inconsistent with what we get by names?
LOG.debug("Done retrieving all objects for getPartitionsViaOrmFilter");
List results = convertToParts(mparts);
query.closeAll();
return results;
}
private Integer getNumPartitionsViaOrmFilter(Table table, ExpressionTree tree, boolean isValidatedFilter)
throws MetaException {
Map params = new HashMap<>();
String jdoFilter = makeQueryFilterString(table.getCatName(), table.getDbName(), table, tree,
params, isValidatedFilter);
if (jdoFilter == null) {
assert !isValidatedFilter;
return null;
}
Query query = pm.newQuery(
"select count(partitionName) from org.apache.hadoop.hive.metastore.model.MPartition"
);
query.setFilter(jdoFilter);
String parameterDeclaration = makeParameterDeclarationStringObj(params);
query.declareParameters(parameterDeclaration);
Long result = (Long) query.executeWithMap(params);
query.closeAll();
return result.intValue();
}
/**
* Gets partition names from the table via ORM (JDOQL) name filter.
* @param dbName Database name.
* @param tblName Table name.
* @param partNames Partition names to get the objects for.
* @return Resulting partitions.
*/
private List getPartitionsViaOrmFilter(String catName,
String dbName, String tblName, List partNames) throws MetaException {
if (partNames.isEmpty()) {
return new ArrayList<>();
}
ObjectPair> queryWithParams =
getPartQueryWithParams(catName, dbName, tblName, partNames);
Query query = queryWithParams.getFirst();
query.setResultClass(MPartition.class);
query.setClass(MPartition.class);
query.setOrdering("partitionName ascending");
@SuppressWarnings("unchecked")
List mparts = (List)query.executeWithMap(queryWithParams.getSecond());
List partitions = convertToParts(catName, dbName, tblName, mparts);
if (query != null) {
query.closeAll();
}
return partitions;
}
private void dropPartitionsNoTxn(String catName, String dbName, String tblName, List partNames) {
ObjectPair> queryWithParams =
getPartQueryWithParams(catName, dbName, tblName, partNames);
Query query = queryWithParams.getFirst();
query.setClass(MPartition.class);
long deleted = query.deletePersistentAll(queryWithParams.getSecond());
LOG.debug("Deleted {} partition from store", deleted);
query.closeAll();
}
/**
* Detaches column descriptors from storage descriptors; returns the set of unique CDs
* thus detached. This is done before dropping partitions because CDs are reused between
* SDs; so, we remove the links to delete SDs and then check the returned CDs to see if
* they are referenced by other SDs.
*/
private HashSet detachCdsFromSdsNoTxn(
String catName, String dbName, String tblName, List partNames) {
ObjectPair> queryWithParams =
getPartQueryWithParams(catName, dbName, tblName, partNames);
Query query = queryWithParams.getFirst();
query.setClass(MPartition.class);
query.setResult("sd");
@SuppressWarnings("unchecked")
List sds = (List)query.executeWithMap(
queryWithParams.getSecond());
HashSet candidateCds = new HashSet<>();
for (MStorageDescriptor sd : sds) {
if (sd != null && sd.getCD() != null) {
candidateCds.add(sd.getCD());
sd.setCD(null);
}
}
if (query != null) {
query.closeAll();
}
return candidateCds;
}
private ObjectPair> getPartQueryWithParams(
String catName, String dbName, String tblName, List partNames) {
StringBuilder sb = new StringBuilder("table.tableName == t1 && table.database.name == t2 &&" +
" table.database.catalogName == t3 && (");
int n = 0;
Map params = new HashMap<>();
for (Iterator itr = partNames.iterator(); itr.hasNext();) {
String pn = "p" + n;
n++;
String part = itr.next();
params.put(pn, part);
sb.append("partitionName == ").append(pn);
sb.append(" || ");
}
sb.setLength(sb.length() - 4); // remove the last " || "
sb.append(')');
Query query = pm.newQuery();
query.setFilter(sb.toString());
LOG.debug(" JDOQL filter is {}", sb);
params.put("t1", normalizeIdentifier(tblName));
params.put("t2", normalizeIdentifier(dbName));
params.put("t3", normalizeIdentifier(catName));
query.declareParameters(makeParameterDeclarationString(params));
return new ObjectPair<>(query, params);
}
@Override
public List getPartitionsByFilter(String catName, String dbName, String tblName,
String filter, short maxParts) throws MetaException, NoSuchObjectException {
return getPartitionsByFilterInternal(catName, dbName, tblName, filter, maxParts, true, true);
}
/** Helper class for getting stuff w/transaction, direct SQL, perf logging, etc. */
@VisibleForTesting
public abstract class GetHelper {
private final boolean isInTxn, doTrace, allowJdo;
private boolean doUseDirectSql;
private long start;
private Table table;
protected final String catName, dbName, tblName;
private boolean success = false;
protected T results = null;
public GetHelper(String catalogName, String dbName, String tblName,
boolean allowSql, boolean allowJdo) throws MetaException {
assert allowSql || allowJdo;
this.allowJdo = allowJdo;
this.catName = normalizeIdentifier(catalogName);
this.dbName = normalizeIdentifier(dbName);
if (tblName != null){
this.tblName = normalizeIdentifier(tblName);
} else {
// tblName can be null in cases of Helper being used at a higher
// abstraction level, such as with datbases
this.tblName = null;
this.table = null;
}
this.doTrace = LOG.isDebugEnabled();
this.isInTxn = isActiveTransaction();
// SQL usage inside a larger transaction (e.g. droptable) may not be desirable because
// some databases (e.g. Postgres) abort the entire transaction when any query fails, so
// the fallback from failed SQL to JDO is not possible.
boolean isConfigEnabled = MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL)
&& (MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL_DDL) || !isInTxn);
if (isConfigEnabled && directSql == null) {
dbType = determineDatabaseProduct();
directSql = new MetaStoreDirectSql(pm, getConf(), "");
}
if (!allowJdo && isConfigEnabled && !directSql.isCompatibleDatastore()) {
throw new MetaException("SQL is not operational"); // test path; SQL is enabled and broken.
}
this.doUseDirectSql = allowSql && isConfigEnabled && directSql.isCompatibleDatastore();
}
protected boolean canUseDirectSql(GetHelper ctx) throws MetaException {
return true; // By default, assume we can user directSQL - that's kind of the point.
}
protected abstract String describeResult();
protected abstract T getSqlResult(GetHelper ctx) throws MetaException;
protected abstract T getJdoResult(
GetHelper ctx) throws MetaException, NoSuchObjectException;
public T run(boolean initTable) throws MetaException, NoSuchObjectException {
try {
start(initTable);
if (doUseDirectSql) {
try {
directSql.prepareTxn();
this.results = getSqlResult(this);
} catch (Exception ex) {
handleDirectSqlError(ex);
}
}
// Note that this will be invoked in 2 cases:
// 1) DirectSQL was disabled to start with;
// 2) DirectSQL threw and was disabled in handleDirectSqlError.
if (!doUseDirectSql) {
this.results = getJdoResult(this);
}
return commit();
} catch (NoSuchObjectException ex) {
throw ex;
} catch (MetaException ex) {
throw ex;
} catch (Exception ex) {
LOG.error("", ex);
throw new MetaException(ex.getMessage());
} finally {
close();
}
}
private void start(boolean initTable) throws MetaException, NoSuchObjectException {
start = doTrace ? System.nanoTime() : 0;
openTransaction();
if (initTable && (tblName != null)) {
table = ensureGetTable(catName, dbName, tblName);
}
doUseDirectSql = doUseDirectSql && canUseDirectSql(this);
}
private void handleDirectSqlError(Exception ex) throws MetaException, NoSuchObjectException {
String message = null;
try {
message = generateShorterMessage(ex);
} catch (Throwable t) {
message = ex.toString() + "; error building a better message: " + t.getMessage();
}
LOG.warn(message); // Don't log the exception, people just get confused.
if (LOG.isDebugEnabled()) {
LOG.debug("Full DirectSQL callstack for debugging (note: this is not an error)", ex);
}
if (!allowJdo) {
if (ex instanceof MetaException) {
throw (MetaException)ex;
}
throw new MetaException(ex.getMessage());
}
if (!isInTxn) {
JDOException rollbackEx = null;
try {
rollbackTransaction();
} catch (JDOException jex) {
rollbackEx = jex;
}
if (rollbackEx != null) {
// Datanucleus propagates some pointless exceptions and rolls back in the finally.
if (currentTransaction != null && currentTransaction.isActive()) {
throw rollbackEx; // Throw if the tx wasn't rolled back.
}
LOG.info("Ignoring exception, rollback succeeded: " + rollbackEx.getMessage());
}
start = doTrace ? System.nanoTime() : 0;
openTransaction();
if (table != null) {
table = ensureGetTable(catName, dbName, tblName);
}
} else {
start = doTrace ? System.nanoTime() : 0;
}
if (directSqlErrors != null) {
directSqlErrors.inc();
}
doUseDirectSql = false;
}
private String generateShorterMessage(Exception ex) {
StringBuilder message = new StringBuilder(
"Falling back to ORM path due to direct SQL failure (this is not an error): ");
Throwable t = ex;
StackTraceElement[] prevStack = null;
while (t != null) {
message.append(t.getMessage());
StackTraceElement[] stack = t.getStackTrace();
int uniqueFrames = stack.length - 1;
if (prevStack != null) {
int n = prevStack.length - 1;
while (uniqueFrames >= 0 && n >= 0 && stack[uniqueFrames].equals(prevStack[n])) {
uniqueFrames--; n--;
}
}
for (int i = 0; i <= uniqueFrames; ++i) {
StackTraceElement ste = stack[i];
message.append(" at ").append(ste);
if (ste.getMethodName() != null && ste.getMethodName().contains("getSqlResult")
&& (ste.getFileName() == null || ste.getFileName().contains("ObjectStore"))) {
break;
}
}
prevStack = stack;
t = t.getCause();
if (t != null) {
message.append(";\n Caused by: ");
}
}
return message.toString();
}
private T commit() {
success = commitTransaction();
if (doTrace) {
double time = ((System.nanoTime() - start) / 1000000.0);
String result = describeResult();
String retrieveType = doUseDirectSql ? "SQL" : "ORM";
LOG.debug("{} retrieved using {} in {}ms", result, retrieveType, time);
}
return results;
}
private void close() {
if (!success) {
rollbackTransaction();
}
}
public Table getTable() {
return table;
}
}
private abstract class GetListHelper extends GetHelper> {
public GetListHelper(String catName, String dbName, String tblName, boolean allowSql,
boolean allowJdo) throws MetaException {
super(catName, dbName, tblName, allowSql, allowJdo);
}
@Override
protected String describeResult() {
return results.size() + " entries";
}
}
@VisibleForTesting
public abstract class GetDbHelper extends GetHelper {
/**
* GetHelper for returning db info using directSql/JDO.
* @param dbName The Database Name
* @param allowSql Whether or not we allow DirectSQL to perform this query.
* @param allowJdo Whether or not we allow ORM to perform this query.
* @throws MetaException
*/
public GetDbHelper(String catalogName, String dbName,boolean allowSql, boolean allowJdo)
throws MetaException {
super(catalogName, dbName,null,allowSql,allowJdo);
}
@Override
protected String describeResult() {
return "db details for db ".concat(dbName);
}
}
private abstract class GetStatHelper extends GetHelper {
public GetStatHelper(String catalogName, String dbName, String tblName, boolean allowSql,
boolean allowJdo) throws MetaException {
super(catalogName, dbName, tblName, allowSql, allowJdo);
}
@Override
protected String describeResult() {
return "statistics for " + (results == null ? 0 : results.getStatsObjSize()) + " columns";
}
}
@Override
public int getNumPartitionsByFilter(String catName, String dbName, String tblName,
String filter) throws MetaException, NoSuchObjectException {
final ExpressionTree exprTree = com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.isNotEmpty(filter)
? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE;
return new GetHelper(catName, dbName, tblName, true, true) {
private final SqlFilterForPushdown filter = new SqlFilterForPushdown();
@Override
protected String describeResult() {
return "Partition count";
}
@Override
protected boolean canUseDirectSql(GetHelper ctx) throws MetaException {
return directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, filter);
}
@Override
protected Integer getSqlResult(GetHelper ctx) throws MetaException {
return directSql.getNumPartitionsViaSqlFilter(filter);
}
@Override
protected Integer getJdoResult(
GetHelper ctx) throws MetaException, NoSuchObjectException {
return getNumPartitionsViaOrmFilter(ctx.getTable(), exprTree, true);
}
}.run(true);
}
@Override
public int getNumPartitionsByExpr(String catName, String dbName, String tblName,
byte[] expr) throws MetaException, NoSuchObjectException {
final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr);
final byte[] tempExpr = expr; // Need to be final to pass it to an inner class
return new GetHelper