Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
org.apache.hadoop.hive.metastore.ObjectStore Maven / Gradle / Ivy
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.metastore;
import static com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.join;
import java.io.IOException;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.regex.Pattern;
import javax.jdo.JDODataStoreException;
import javax.jdo.JDOHelper;
import javax.jdo.JDOObjectNotFoundException;
import javax.jdo.PersistenceManager;
import javax.jdo.PersistenceManagerFactory;
import javax.jdo.Query;
import javax.jdo.Transaction;
import javax.jdo.datastore.DataStoreCache;
import javax.jdo.identity.IntIdentity;
import org.antlr.runtime.CommonTokenStream;
import org.antlr.runtime.RecognitionException;
import com.facebook.presto.hive.$internal.org.apache.commons.logging.Log;
import com.facebook.presto.hive.$internal.org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.common.classification.InterfaceAudience;
import org.apache.hadoop.hive.common.classification.InterfaceStability;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.api.AggrStats;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Function;
import org.apache.hadoop.hive.metastore.api.FunctionType;
import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
import org.apache.hadoop.hive.metastore.api.HiveObjectType;
import org.apache.hadoop.hive.metastore.api.Index;
import org.apache.hadoop.hive.metastore.api.InvalidInputException;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.NotificationEvent;
import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.PartitionEventType;
import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
import org.apache.hadoop.hive.metastore.api.ResourceType;
import org.apache.hadoop.hive.metastore.api.ResourceUri;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
import org.apache.hadoop.hive.metastore.model.MColumnDescriptor;
import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
import org.apache.hadoop.hive.metastore.model.MDatabase;
import org.apache.hadoop.hive.metastore.model.MDelegationToken;
import org.apache.hadoop.hive.metastore.model.MFieldSchema;
import org.apache.hadoop.hive.metastore.model.MFunction;
import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege;
import org.apache.hadoop.hive.metastore.model.MIndex;
import org.apache.hadoop.hive.metastore.model.MMasterKey;
import org.apache.hadoop.hive.metastore.model.MNotificationLog;
import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
import org.apache.hadoop.hive.metastore.model.MOrder;
import org.apache.hadoop.hive.metastore.model.MPartition;
import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics;
import org.apache.hadoop.hive.metastore.model.MPartitionEvent;
import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
import org.apache.hadoop.hive.metastore.model.MResourceUri;
import org.apache.hadoop.hive.metastore.model.MRole;
import org.apache.hadoop.hive.metastore.model.MRoleMap;
import org.apache.hadoop.hive.metastore.model.MSerDeInfo;
import org.apache.hadoop.hive.metastore.model.MStorageDescriptor;
import org.apache.hadoop.hive.metastore.model.MStringList;
import org.apache.hadoop.hive.metastore.model.MTable;
import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics;
import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
import org.apache.hadoop.hive.metastore.model.MType;
import org.apache.hadoop.hive.metastore.model.MVersionTable;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree.ANTLRNoCaseStringStream;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode;
import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
import org.apache.hadoop.hive.metastore.parser.FilterLexer;
import org.apache.hadoop.hive.metastore.parser.FilterParser;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
import org.apache.hadoop.util.StringUtils;
import org.apache.hive.common.util.HiveStringUtils;
import org.apache.thrift.TException;
import org.datanucleus.store.rdbms.exceptions.MissingTableException;
import com.facebook.presto.hive.$internal.com.google.common.collect.Lists;
/**
* This class is the interface between the application logic and the database
* store that contains the objects. Refrain putting any logic in mode.M* objects
* or in this file as former could be auto generated and this class would need
* to be made into a interface that can read both from a database and a
* filestore.
*/
public class ObjectStore implements RawStore, Configurable {
private static Properties prop = null;
private static PersistenceManagerFactory pmf = null;
private static Lock pmfPropLock = new ReentrantLock();
/**
* Verify the schema only once per JVM since the db connection info is static
*/
private final static AtomicBoolean isSchemaVerified = new AtomicBoolean(false);
private static final Log LOG = LogFactory.getLog(ObjectStore.class.getName());
private static enum TXN_STATUS {
NO_STATE, OPEN, COMMITED, ROLLBACK
}
private static final Map PINCLASSMAP;
private static final String HOSTNAME;
private static final String USER;
static {
Map map = new HashMap();
map.put("table", MTable.class);
map.put("storagedescriptor", MStorageDescriptor.class);
map.put("serdeinfo", MSerDeInfo.class);
map.put("partition", MPartition.class);
map.put("database", MDatabase.class);
map.put("type", MType.class);
map.put("fieldschema", MFieldSchema.class);
map.put("order", MOrder.class);
PINCLASSMAP = Collections.unmodifiableMap(map);
String hostname = "UNKNOWN";
try {
InetAddress clientAddr = InetAddress.getLocalHost();
hostname = clientAddr.getHostAddress();
} catch (IOException e) {
}
HOSTNAME = hostname;
String user = System.getenv("USER");
if (user == null) {
USER = "UNKNOWN";
} else {
USER = user;
}
}
private boolean isInitialized = false;
private PersistenceManager pm = null;
private MetaStoreDirectSql directSql = null;
private PartitionExpressionProxy expressionProxy = null;
private Configuration hiveConf;
int openTrasactionCalls = 0;
private Transaction currentTransaction = null;
private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE;
private Pattern partitionValidationPattern;
public ObjectStore() {
}
@Override
public Configuration getConf() {
return hiveConf;
}
/**
* Called whenever this object is instantiated using ReflectionUtils, and also
* on connection retries. In cases of connection retries, conf will usually
* contain modified values.
*/
@Override
@SuppressWarnings("nls")
public void setConf(Configuration conf) {
// Although an instance of ObjectStore is accessed by one thread, there may
// be many threads with ObjectStore instances. So the static variables
// pmf and prop need to be protected with locks.
pmfPropLock.lock();
try {
isInitialized = false;
hiveConf = conf;
Properties propsFromConf = getDataSourceProps(conf);
boolean propsChanged = !propsFromConf.equals(prop);
if (propsChanged) {
pmf = null;
prop = null;
}
assert(!isActiveTransaction());
shutdown();
// Always want to re-create pm as we don't know if it were created by the
// most recent instance of the pmf
pm = null;
directSql = null;
expressionProxy = null;
openTrasactionCalls = 0;
currentTransaction = null;
transactionStatus = TXN_STATUS.NO_STATE;
initialize(propsFromConf);
String partitionValidationRegex =
hiveConf.get(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.name());
if (partitionValidationRegex != null && partitionValidationRegex.equals("")) {
partitionValidationPattern = Pattern.compile(partitionValidationRegex);
} else {
partitionValidationPattern = null;
}
if (!isInitialized) {
throw new RuntimeException(
"Unable to create persistence manager. Check dss.log for details");
} else {
LOG.info("Initialized ObjectStore");
}
} finally {
pmfPropLock.unlock();
}
}
private ClassLoader classLoader;
{
classLoader = Thread.currentThread().getContextClassLoader();
if (classLoader == null) {
classLoader = ObjectStore.class.getClassLoader();
}
}
@SuppressWarnings("nls")
private void initialize(Properties dsProps) {
LOG.info("ObjectStore, initialize called");
prop = dsProps;
pm = getPersistenceManager();
isInitialized = pm != null;
if (isInitialized) {
expressionProxy = createExpressionProxy(hiveConf);
directSql = new MetaStoreDirectSql(pm, hiveConf);
}
LOG.debug("RawStore: " + this + ", with PersistenceManager: " + pm +
" created in the thread with id: " + Thread.currentThread().getId());
}
/**
* Creates the proxy used to evaluate expressions. This is here to prevent circular
* dependency - ql -> metastore client <-> metastore server -> ql. If server and
* client are split, this can be removed.
* @param conf Configuration.
* @return The partition expression proxy.
*/
private static PartitionExpressionProxy createExpressionProxy(Configuration conf) {
String className = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS);
try {
@SuppressWarnings("unchecked")
Class extends PartitionExpressionProxy> clazz =
(Class extends PartitionExpressionProxy>)MetaStoreUtils.getClass(className);
return MetaStoreUtils.newInstance(
clazz, new Class>[0], new Object[0]);
} catch (MetaException e) {
LOG.error("Error loading PartitionExpressionProxy", e);
throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage());
}
}
/**
* Properties specified in hive-default.xml override the properties specified
* in jpox.properties.
*/
@SuppressWarnings("nls")
private static Properties getDataSourceProps(Configuration conf) {
Properties prop = new Properties();
Iterator> iter = conf.iterator();
while (iter.hasNext()) {
Map.Entry e = iter.next();
if (e.getKey().contains("datanucleus") || e.getKey().contains("jdo")) {
Object prevVal = prop.setProperty(e.getKey(), conf.get(e.getKey()));
if (LOG.isDebugEnabled()
&& !e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) {
LOG.debug("Overriding " + e.getKey() + " value " + prevVal
+ " from jpox.properties with " + e.getValue());
}
}
}
// Password may no longer be in the conf, use getPassword()
try {
String passwd =
ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.METASTOREPWD.varname);
if (passwd != null && !passwd.isEmpty()) {
prop.setProperty(HiveConf.ConfVars.METASTOREPWD.varname, passwd);
}
} catch (IOException err) {
throw new RuntimeException("Error getting metastore password: " + err.getMessage(), err);
}
if (LOG.isDebugEnabled()) {
for (Entry e : prop.entrySet()) {
if (!e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) {
LOG.debug(e.getKey() + " = " + e.getValue());
}
}
}
return prop;
}
private static synchronized PersistenceManagerFactory getPMF() {
if (pmf == null) {
pmf = JDOHelper.getPersistenceManagerFactory(prop);
DataStoreCache dsc = pmf.getDataStoreCache();
if (dsc != null) {
HiveConf conf = new HiveConf(ObjectStore.class);
String objTypes = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES);
LOG.info("Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes=\"" + objTypes + "\"");
if (objTypes != null && objTypes.length() > 0) {
objTypes = objTypes.toLowerCase();
String[] typeTokens = objTypes.split(",");
for (String type : typeTokens) {
type = type.trim();
if (PINCLASSMAP.containsKey(type)) {
dsc.pinAll(true, PINCLASSMAP.get(type));
}
else {
LOG.warn(type + " is not one of the pinnable object types: " + com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.join(PINCLASSMAP.keySet(), " "));
}
}
}
} else {
LOG.warn("PersistenceManagerFactory returned null DataStoreCache object. Unable to initialize object pin types defined by hive.metastore.cache.pinobjtypes");
}
}
return pmf;
}
@InterfaceAudience.LimitedPrivate({"HCATALOG"})
@InterfaceStability.Evolving
public PersistenceManager getPersistenceManager() {
return getPMF().getPersistenceManager();
}
@Override
public void shutdown() {
if (pm != null) {
LOG.debug("RawStore: " + this + ", with PersistenceManager: " + pm +
" will be shutdown");
pm.close();
}
}
/**
* Opens a new one or the one already created Every call of this function must
* have corresponding commit or rollback function call
*
* @return an active transaction
*/
@Override
public boolean openTransaction() {
openTrasactionCalls++;
if (openTrasactionCalls == 1) {
currentTransaction = pm.currentTransaction();
currentTransaction.begin();
transactionStatus = TXN_STATUS.OPEN;
} else {
// openTransactionCalls > 1 means this is an interior transaction
// We should already have a transaction created that is active.
if ((currentTransaction == null) || (!currentTransaction.isActive())){
throw new RuntimeException("openTransaction called in an interior"
+ " transaction scope, but currentTransaction is not active.");
}
}
boolean result = currentTransaction.isActive();
debugLog("Open transaction: count = " + openTrasactionCalls + ", isActive = " + result);
return result;
}
/**
* if this is the commit of the first open call then an actual commit is
* called.
*
* @return Always returns true
*/
@Override
@SuppressWarnings("nls")
public boolean commitTransaction() {
if (TXN_STATUS.ROLLBACK == transactionStatus) {
debugLog("Commit transaction: rollback");
return false;
}
if (openTrasactionCalls <= 0) {
RuntimeException e = new RuntimeException("commitTransaction was called but openTransactionCalls = "
+ openTrasactionCalls + ". This probably indicates that there are unbalanced " +
"calls to openTransaction/commitTransaction");
LOG.error(e);
throw e;
}
if (!currentTransaction.isActive()) {
RuntimeException e = new RuntimeException("commitTransaction was called but openTransactionCalls = "
+ openTrasactionCalls + ". This probably indicates that there are unbalanced " +
"calls to openTransaction/commitTransaction");
LOG.error(e);
throw e;
}
openTrasactionCalls--;
debugLog("Commit transaction: count = " + openTrasactionCalls + ", isactive "+ currentTransaction.isActive());
if ((openTrasactionCalls == 0) && currentTransaction.isActive()) {
transactionStatus = TXN_STATUS.COMMITED;
currentTransaction.commit();
}
return true;
}
/**
* @return true if there is an active transaction. If the current transaction
* is either committed or rolled back it returns false
*/
public boolean isActiveTransaction() {
if (currentTransaction == null) {
return false;
}
return currentTransaction.isActive();
}
/**
* Rolls back the current transaction if it is active
*/
@Override
public void rollbackTransaction() {
if (openTrasactionCalls < 1) {
debugLog("rolling back transaction: no open transactions: " + openTrasactionCalls);
return;
}
debugLog("Rollback transaction, isActive: " + currentTransaction.isActive());
try {
if (currentTransaction.isActive()
&& transactionStatus != TXN_STATUS.ROLLBACK) {
currentTransaction.rollback();
}
} finally {
openTrasactionCalls = 0;
transactionStatus = TXN_STATUS.ROLLBACK;
// remove all detached objects from the cache, since the transaction is
// being rolled back they are no longer relevant, and this prevents them
// from reattaching in future transactions
pm.evictAll();
}
}
@Override
public void createDatabase(Database db) throws InvalidObjectException, MetaException {
boolean commited = false;
MDatabase mdb = new MDatabase();
mdb.setName(db.getName().toLowerCase());
mdb.setLocationUri(db.getLocationUri());
mdb.setDescription(db.getDescription());
mdb.setParameters(db.getParameters());
mdb.setOwnerName(db.getOwnerName());
PrincipalType ownerType = db.getOwnerType();
mdb.setOwnerType((null == ownerType ? PrincipalType.USER.name() : ownerType.name()));
try {
openTransaction();
pm.makePersistent(mdb);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
}
@SuppressWarnings("nls")
private MDatabase getMDatabase(String name) throws NoSuchObjectException {
MDatabase mdb = null;
boolean commited = false;
try {
openTransaction();
name = HiveStringUtils.normalizeIdentifier(name);
Query query = pm.newQuery(MDatabase.class, "name == dbname");
query.declareParameters("java.lang.String dbname");
query.setUnique(true);
mdb = (MDatabase) query.execute(name);
pm.retrieve(mdb);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
if (mdb == null) {
throw new NoSuchObjectException("There is no database named " + name);
}
return mdb;
}
@Override
public Database getDatabase(String name) throws NoSuchObjectException {
MetaException ex = null;
Database db = null;
try {
db = getDatabaseInternal(name);
} catch (MetaException e) {
// Signature restriction to NSOE, and NSOE being a flat exception prevents us from
// setting the cause of the NSOE as the MetaException. We should not lose the info
// we got here, but it's very likely that the MetaException is irrelevant and is
// actually an NSOE message, so we should log it and throw an NSOE with the msg.
ex = e;
}
if (db == null) {
LOG.warn("Failed to get database " + name +", returning NoSuchObjectException", ex);
throw new NoSuchObjectException(name + (ex == null ? "" : (": " + ex.getMessage())));
}
return db;
}
public Database getDatabaseInternal(String name) throws MetaException, NoSuchObjectException {
return new GetDbHelper(name, null, true, true) {
@Override
protected Database getSqlResult(GetHelper ctx) throws MetaException {
return directSql.getDatabase(dbName);
}
@Override
protected Database getJdoResult(GetHelper ctx) throws MetaException, NoSuchObjectException {
return getJDODatabase(dbName);
}
}.run(false);
}
public Database getJDODatabase(String name) throws NoSuchObjectException {
MDatabase mdb = null;
boolean commited = false;
try {
openTransaction();
mdb = getMDatabase(name);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
Database db = new Database();
db.setName(mdb.getName());
db.setDescription(mdb.getDescription());
db.setLocationUri(mdb.getLocationUri());
db.setParameters(convertMap(mdb.getParameters()));
db.setOwnerName(mdb.getOwnerName());
String type = mdb.getOwnerType();
db.setOwnerType((null == type || type.trim().isEmpty()) ? null : PrincipalType.valueOf(type));
return db;
}
/**
* Alter the database object in metastore. Currently only the parameters
* of the database or the owner can be changed.
* @param dbName the database name
* @param db the Hive Database object
* @throws MetaException
* @throws NoSuchObjectException
*/
@Override
public boolean alterDatabase(String dbName, Database db)
throws MetaException, NoSuchObjectException {
MDatabase mdb = null;
boolean committed = false;
try {
mdb = getMDatabase(dbName);
mdb.setParameters(db.getParameters());
mdb.setOwnerName(db.getOwnerName());
if (db.getOwnerType() != null) {
mdb.setOwnerType(db.getOwnerType().name());
}
openTransaction();
pm.makePersistent(mdb);
committed = commitTransaction();
} finally {
if (!committed) {
rollbackTransaction();
return false;
}
}
return true;
}
@Override
public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException {
boolean success = false;
LOG.info("Dropping database " + dbname + " along with all tables");
dbname = HiveStringUtils.normalizeIdentifier(dbname);
try {
openTransaction();
// then drop the database
MDatabase db = getMDatabase(dbname);
pm.retrieve(db);
if (db != null) {
List dbGrants = this.listDatabaseGrants(dbname);
if (dbGrants != null && dbGrants.size() > 0) {
pm.deletePersistentAll(dbGrants);
}
pm.deletePersistent(db);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
@Override
public List getDatabases(String pattern) throws MetaException {
boolean commited = false;
List databases = null;
try {
openTransaction();
// Take the pattern and split it on the | to get all the composing
// patterns
String[] subpatterns = pattern.trim().split("\\|");
String query = "select name from org.apache.hadoop.hive.metastore.model.MDatabase where (";
boolean first = true;
for (String subpattern : subpatterns) {
subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*");
if (!first) {
query = query + " || ";
}
query = query + " name.matches(\"" + subpattern + "\")";
first = false;
}
query = query + ")";
Query q = pm.newQuery(query);
q.setResult("name");
q.setOrdering("name ascending");
Collection names = (Collection) q.execute();
databases = new ArrayList();
for (Iterator i = names.iterator(); i.hasNext();) {
databases.add((String) i.next());
}
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
return databases;
}
@Override
public List getAllDatabases() throws MetaException {
return getDatabases(".*");
}
private MType getMType(Type type) {
List fields = new ArrayList();
if (type.getFields() != null) {
for (FieldSchema field : type.getFields()) {
fields.add(new MFieldSchema(field.getName(), field.getType(), field
.getComment()));
}
}
return new MType(type.getName(), type.getType1(), type.getType2(), fields);
}
private Type getType(MType mtype) {
List fields = new ArrayList();
if (mtype.getFields() != null) {
for (MFieldSchema field : mtype.getFields()) {
fields.add(new FieldSchema(field.getName(), field.getType(), field
.getComment()));
}
}
Type ret = new Type();
ret.setName(mtype.getName());
ret.setType1(mtype.getType1());
ret.setType2(mtype.getType2());
ret.setFields(fields);
return ret;
}
@Override
public boolean createType(Type type) {
boolean success = false;
MType mtype = getMType(type);
boolean commited = false;
try {
openTransaction();
pm.makePersistent(mtype);
commited = commitTransaction();
success = true;
} finally {
if (!commited) {
rollbackTransaction();
}
}
return success;
}
@Override
public Type getType(String typeName) {
Type type = null;
boolean commited = false;
try {
openTransaction();
Query query = pm.newQuery(MType.class, "name == typeName");
query.declareParameters("java.lang.String typeName");
query.setUnique(true);
MType mtype = (MType) query.execute(typeName.trim());
pm.retrieve(type);
if (mtype != null) {
type = getType(mtype);
}
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
return type;
}
@Override
public boolean dropType(String typeName) {
boolean success = false;
try {
openTransaction();
Query query = pm.newQuery(MType.class, "name == typeName");
query.declareParameters("java.lang.String typeName");
query.setUnique(true);
MType type = (MType) query.execute(typeName.trim());
pm.retrieve(type);
if (type != null) {
pm.deletePersistent(type);
}
success = commitTransaction();
} catch (JDOObjectNotFoundException e) {
success = commitTransaction();
LOG.debug("type not found " + typeName, e);
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
@Override
public void createTable(Table tbl) throws InvalidObjectException, MetaException {
boolean commited = false;
try {
openTransaction();
MTable mtbl = convertToMTable(tbl);
pm.makePersistent(mtbl);
PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges();
List toPersistPrivObjs = new ArrayList();
if (principalPrivs != null) {
int now = (int)(System.currentTimeMillis()/1000);
Map> userPrivs = principalPrivs.getUserPrivileges();
putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, userPrivs, PrincipalType.USER);
Map> groupPrivs = principalPrivs.getGroupPrivileges();
putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, groupPrivs, PrincipalType.GROUP);
Map> rolePrivs = principalPrivs.getRolePrivileges();
putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, rolePrivs, PrincipalType.ROLE);
}
pm.makePersistentAll(toPersistPrivObjs);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
}
/**
* Convert PrivilegeGrantInfo from privMap to MTablePrivilege, and add all of
* them to the toPersistPrivObjs. These privilege objects will be persisted as
* part of createTable.
*
* @param mtbl
* @param toPersistPrivObjs
* @param now
* @param privMap
* @param type
*/
private void putPersistentPrivObjects(MTable mtbl, List toPersistPrivObjs,
int now, Map> privMap, PrincipalType type) {
if (privMap != null) {
for (Map.Entry> entry : privMap
.entrySet()) {
String principalName = entry.getKey();
List privs = entry.getValue();
for (int i = 0; i < privs.size(); i++) {
PrivilegeGrantInfo priv = privs.get(i);
if (priv == null) {
continue;
}
MTablePrivilege mTblSec = new MTablePrivilege(
principalName, type.toString(), mtbl, priv.getPrivilege(),
now, priv.getGrantor(), priv.getGrantorType().toString(), priv
.isGrantOption());
toPersistPrivObjs.add(mTblSec);
}
}
}
}
@Override
public boolean dropTable(String dbName, String tableName) throws MetaException,
NoSuchObjectException, InvalidObjectException, InvalidInputException {
boolean success = false;
try {
openTransaction();
MTable tbl = getMTable(dbName, tableName);
pm.retrieve(tbl);
if (tbl != null) {
// first remove all the grants
List tabGrants = listAllTableGrants(dbName, tableName);
if (tabGrants != null && tabGrants.size() > 0) {
pm.deletePersistentAll(tabGrants);
}
List tblColGrants = listTableAllColumnGrants(dbName,
tableName);
if (tblColGrants != null && tblColGrants.size() > 0) {
pm.deletePersistentAll(tblColGrants);
}
List partGrants = this.listTableAllPartitionGrants(dbName, tableName);
if (partGrants != null && partGrants.size() > 0) {
pm.deletePersistentAll(partGrants);
}
List partColGrants = listTableAllPartitionColumnGrants(dbName,
tableName);
if (partColGrants != null && partColGrants.size() > 0) {
pm.deletePersistentAll(partColGrants);
}
// delete column statistics if present
try {
deleteTableColumnStatistics(dbName, tableName, null);
} catch (NoSuchObjectException e) {
LOG.info("Found no table level column statistics associated with db " + dbName +
" table " + tableName + " record to delete");
}
preDropStorageDescriptor(tbl.getSd());
// then remove the table
pm.deletePersistentAll(tbl);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
@Override
public Table getTable(String dbName, String tableName) throws MetaException {
boolean commited = false;
Table tbl = null;
try {
openTransaction();
tbl = convertToTable(getMTable(dbName, tableName));
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
return tbl;
}
@Override
public List getTables(String dbName, String pattern)
throws MetaException {
boolean commited = false;
List tbls = null;
try {
openTransaction();
dbName = HiveStringUtils.normalizeIdentifier(dbName);
// Take the pattern and split it on the | to get all the composing
// patterns
String[] subpatterns = pattern.trim().split("\\|");
String query =
"select tableName from org.apache.hadoop.hive.metastore.model.MTable "
+ "where database.name == dbName && (";
boolean first = true;
for (String subpattern : subpatterns) {
subpattern = "(?i)" + subpattern.replaceAll("\\*", ".*");
if (!first) {
query = query + " || ";
}
query = query + " tableName.matches(\"" + subpattern + "\")";
first = false;
}
query = query + ")";
Query q = pm.newQuery(query);
q.declareParameters("java.lang.String dbName");
q.setResult("tableName");
q.setOrdering("tableName ascending");
Collection names = (Collection) q.execute(dbName);
tbls = new ArrayList();
for (Iterator i = names.iterator(); i.hasNext();) {
tbls.add((String) i.next());
}
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
return tbls;
}
@Override
public List getAllTables(String dbName) throws MetaException {
return getTables(dbName, ".*");
}
private MTable getMTable(String db, String table) {
MTable mtbl = null;
boolean commited = false;
try {
openTransaction();
db = HiveStringUtils.normalizeIdentifier(db);
table = HiveStringUtils.normalizeIdentifier(table);
Query query = pm.newQuery(MTable.class, "tableName == table && database.name == db");
query.declareParameters("java.lang.String table, java.lang.String db");
query.setUnique(true);
mtbl = (MTable) query.execute(table, db);
pm.retrieve(mtbl);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
return mtbl;
}
@Override
public List getTableObjectsByName(String db, List tbl_names)
throws MetaException, UnknownDBException {
List tables = new ArrayList();
boolean committed = false;
try {
openTransaction();
db = HiveStringUtils.normalizeIdentifier(db);
Query dbExistsQuery = pm.newQuery(MDatabase.class, "name == db");
dbExistsQuery.declareParameters("java.lang.String db");
dbExistsQuery.setUnique(true);
dbExistsQuery.setResult("name");
String dbNameIfExists = (String) dbExistsQuery.execute(db);
if (dbNameIfExists == null || dbNameIfExists.isEmpty()) {
throw new UnknownDBException("Could not find database " + db);
}
List lowered_tbl_names = new ArrayList();
for (String t : tbl_names) {
lowered_tbl_names.add(HiveStringUtils.normalizeIdentifier(t));
}
Query query = pm.newQuery(MTable.class);
query.setFilter("database.name == db && tbl_names.contains(tableName)");
query.declareParameters("java.lang.String db, java.util.Collection tbl_names");
Collection mtables = (Collection) query.execute(db, lowered_tbl_names);
for (Iterator iter = mtables.iterator(); iter.hasNext();) {
tables.add(convertToTable((MTable) iter.next()));
}
committed = commitTransaction();
} finally {
if (!committed) {
rollbackTransaction();
}
}
return tables;
}
/** Makes shallow copy of a list to avoid DataNucleus mucking with our objects. */
private List convertList(List dnList) {
return (dnList == null) ? null : Lists.newArrayList(dnList);
}
/** Makes shallow copy of a map to avoid DataNucleus mucking with our objects. */
private Map convertMap(Map dnMap) {
return MetaStoreUtils.trimMapNulls(dnMap,
HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS));
}
private Table convertToTable(MTable mtbl) throws MetaException {
if (mtbl == null) {
return null;
}
String tableType = mtbl.getTableType();
if (tableType == null) {
// for backwards compatibility with old metastore persistence
if (mtbl.getViewOriginalText() != null) {
tableType = TableType.VIRTUAL_VIEW.toString();
} else if ("TRUE".equals(mtbl.getParameters().get("EXTERNAL"))) {
tableType = TableType.EXTERNAL_TABLE.toString();
} else {
tableType = TableType.MANAGED_TABLE.toString();
}
}
return new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), mtbl
.getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl
.getRetention(), convertToStorageDescriptor(mtbl.getSd()),
convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()),
mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType);
}
private MTable convertToMTable(Table tbl) throws InvalidObjectException,
MetaException {
if (tbl == null) {
return null;
}
MDatabase mdb = null;
try {
mdb = getMDatabase(tbl.getDbName());
} catch (NoSuchObjectException e) {
LOG.error(StringUtils.stringifyException(e));
throw new InvalidObjectException("Database " + tbl.getDbName()
+ " doesn't exist.");
}
// If the table has property EXTERNAL set, update table type
// accordingly
String tableType = tbl.getTableType();
boolean isExternal = "TRUE".equals(tbl.getParameters().get("EXTERNAL"));
if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
if (isExternal) {
tableType = TableType.EXTERNAL_TABLE.toString();
}
}
if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
if (!isExternal) {
tableType = TableType.MANAGED_TABLE.toString();
}
}
// A new table is always created with a new column descriptor
return new MTable(HiveStringUtils.normalizeIdentifier(tbl.getTableName()), mdb,
convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), tbl
.getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(),
convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(),
tbl.getViewOriginalText(), tbl.getViewExpandedText(),
tableType);
}
private List convertToMFieldSchemas(List keys) {
List mkeys = null;
if (keys != null) {
mkeys = new ArrayList(keys.size());
for (FieldSchema part : keys) {
mkeys.add(new MFieldSchema(HiveStringUtils.normalizeIdentifier(part.getName()),
part.getType(), part.getComment()));
}
}
return mkeys;
}
private List convertToFieldSchemas(List mkeys) {
List keys = null;
if (mkeys != null) {
keys = new ArrayList(mkeys.size());
for (MFieldSchema part : mkeys) {
keys.add(new FieldSchema(part.getName(), part.getType(), part
.getComment()));
}
}
return keys;
}
private List convertToMOrders(List keys) {
List mkeys = null;
if (keys != null) {
mkeys = new ArrayList(keys.size());
for (Order part : keys) {
mkeys.add(new MOrder(HiveStringUtils.normalizeIdentifier(part.getCol()), part.getOrder()));
}
}
return mkeys;
}
private List convertToOrders(List mkeys) {
List keys = null;
if (mkeys != null) {
keys = new ArrayList(mkeys.size());
for (MOrder part : mkeys) {
keys.add(new Order(part.getCol(), part.getOrder()));
}
}
return keys;
}
private SerDeInfo convertToSerDeInfo(MSerDeInfo ms) throws MetaException {
if (ms == null) {
throw new MetaException("Invalid SerDeInfo object");
}
return new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters()));
}
private MSerDeInfo convertToMSerDeInfo(SerDeInfo ms) throws MetaException {
if (ms == null) {
throw new MetaException("Invalid SerDeInfo object");
}
return new MSerDeInfo(ms.getName(), ms.getSerializationLib(), ms
.getParameters());
}
/**
* Given a list of model field schemas, create a new model column descriptor.
* @param cols the columns the column descriptor contains
* @return a new column descriptor db-backed object
*/
private MColumnDescriptor createNewMColumnDescriptor(List cols) {
if (cols == null) {
return null;
}
return new MColumnDescriptor(cols);
}
// MSD and SD should be same objects. Not sure how to make then same right now
// MSerdeInfo *& SerdeInfo should be same as well
private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd,
boolean noFS)
throws MetaException {
if (msd == null) {
return null;
}
List mFieldSchemas = msd.getCD() == null ? null : msd.getCD().getCols();
StorageDescriptor sd = new StorageDescriptor(noFS ? null : convertToFieldSchemas(mFieldSchemas),
msd.getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd
.isCompressed(), msd.getNumBuckets(), convertToSerDeInfo(msd
.getSerDeInfo()), convertList(msd.getBucketCols()), convertToOrders(msd
.getSortCols()), convertMap(msd.getParameters()));
SkewedInfo skewedInfo = new SkewedInfo(convertList(msd.getSkewedColNames()),
convertToSkewedValues(msd.getSkewedColValues()),
covertToSkewedMap(msd.getSkewedColValueLocationMaps()));
sd.setSkewedInfo(skewedInfo);
sd.setStoredAsSubDirectories(msd.isStoredAsSubDirectories());
return sd;
}
private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd)
throws MetaException {
return convertToStorageDescriptor(msd, false);
}
/**
* Convert a list of MStringList to a list of list string
*
* @param mLists
* @return
*/
private List> convertToSkewedValues(List mLists) {
List> lists = null;
if (mLists != null) {
lists = new ArrayList>(mLists.size());
for (MStringList element : mLists) {
lists.add(new ArrayList(element.getInternalList()));
}
}
return lists;
}
private List convertToMStringLists(List> mLists) {
List lists = null ;
if (null != mLists) {
lists = new ArrayList();
for (List mList : mLists) {
lists.add(new MStringList(mList));
}
}
return lists;
}
/**
* Convert a MStringList Map to a Map
* @param mMap
* @return
*/
private Map, String> covertToSkewedMap(Map mMap) {
Map, String> map = null;
if (mMap != null) {
map = new HashMap, String>(mMap.size());
Set keys = mMap.keySet();
for (MStringList key : keys) {
map.put(new ArrayList(key.getInternalList()), mMap.get(key));
}
}
return map;
}
/**
* Covert a Map to a MStringList Map
* @param mMap
* @return
*/
private Map covertToMapMStringList(Map, String> mMap) {
Map map = null;
if (mMap != null) {
map = new HashMap(mMap.size());
Set> keys = mMap.keySet();
for (List key : keys) {
map.put(new MStringList(key), mMap.get(key));
}
}
return map;
}
/**
* Converts a storage descriptor to a db-backed storage descriptor. Creates a
* new db-backed column descriptor object for this SD.
* @param sd the storage descriptor to wrap in a db-backed object
* @return the storage descriptor db-backed object
* @throws MetaException
*/
private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd)
throws MetaException {
if (sd == null) {
return null;
}
MColumnDescriptor mcd = createNewMColumnDescriptor(convertToMFieldSchemas(sd.getCols()));
return convertToMStorageDescriptor(sd, mcd);
}
/**
* Converts a storage descriptor to a db-backed storage descriptor. It points the
* storage descriptor's column descriptor to the one passed as an argument,
* so it does not create a new mcolumn descriptor object.
* @param sd the storage descriptor to wrap in a db-backed object
* @param mcd the db-backed column descriptor
* @return the db-backed storage descriptor object
* @throws MetaException
*/
private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd,
MColumnDescriptor mcd) throws MetaException {
if (sd == null) {
return null;
}
return new MStorageDescriptor(mcd, sd
.getLocation(), sd.getInputFormat(), sd.getOutputFormat(), sd
.isCompressed(), sd.getNumBuckets(), convertToMSerDeInfo(sd
.getSerdeInfo()), sd.getBucketCols(),
convertToMOrders(sd.getSortCols()), sd.getParameters(),
(null == sd.getSkewedInfo()) ? null
: sd.getSkewedInfo().getSkewedColNames(),
convertToMStringLists((null == sd.getSkewedInfo()) ? null : sd.getSkewedInfo()
.getSkewedColValues()),
covertToMapMStringList((null == sd.getSkewedInfo()) ? null : sd.getSkewedInfo()
.getSkewedColValueLocationMaps()), sd.isStoredAsSubDirectories());
}
@Override
public boolean addPartitions(String dbName, String tblName, List parts)
throws InvalidObjectException, MetaException {
boolean success = false;
openTransaction();
try {
List tabGrants = null;
List tabColumnGrants = null;
MTable table = this.getMTable(dbName, tblName);
if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
tabGrants = this.listAllTableGrants(dbName, tblName);
tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName);
}
List toPersist = new ArrayList();
for (Partition part : parts) {
if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
throw new MetaException("Partition does not belong to target table "
+ dbName + "." + tblName + ": " + part);
}
MPartition mpart = convertToMPart(part, true);
toPersist.add(mpart);
int now = (int)(System.currentTimeMillis()/1000);
if (tabGrants != null) {
for (MTablePrivilege tab: tabGrants) {
toPersist.add(new MPartitionPrivilege(tab.getPrincipalName(),
tab.getPrincipalType(), mpart, tab.getPrivilege(), now,
tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption()));
}
}
if (tabColumnGrants != null) {
for (MTableColumnPrivilege col : tabColumnGrants) {
toPersist.add(new MPartitionColumnPrivilege(col.getPrincipalName(),
col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(),
now, col.getGrantor(), col.getGrantorType(), col.getGrantOption()));
}
}
}
if (toPersist.size() > 0) {
pm.makePersistentAll(toPersist);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
private boolean isValidPartition(
Partition part, boolean ifNotExists) throws MetaException {
MetaStoreUtils.validatePartitionNameCharacters(part.getValues(),
partitionValidationPattern);
boolean doesExist = doesPartitionExist(
part.getDbName(), part.getTableName(), part.getValues());
if (doesExist && !ifNotExists) {
throw new MetaException("Partition already exists: " + part);
}
return !doesExist;
}
@Override
public boolean addPartitions(String dbName, String tblName,
PartitionSpecProxy partitionSpec, boolean ifNotExists)
throws InvalidObjectException, MetaException {
boolean success = false;
openTransaction();
try {
List tabGrants = null;
List tabColumnGrants = null;
MTable table = this.getMTable(dbName, tblName);
if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
tabGrants = this.listAllTableGrants(dbName, tblName);
tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName);
}
if (!partitionSpec.getTableName().equals(tblName) || !partitionSpec.getDbName().equals(dbName)) {
throw new MetaException("Partition does not belong to target table "
+ dbName + "." + tblName + ": " + partitionSpec);
}
PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator();
int now = (int)(System.currentTimeMillis()/1000);
while (iterator.hasNext()) {
Partition part = iterator.next();
if (isValidPartition(part, ifNotExists)) {
MPartition mpart = convertToMPart(part, true);
pm.makePersistent(mpart);
if (tabGrants != null) {
for (MTablePrivilege tab : tabGrants) {
pm.makePersistent(new MPartitionPrivilege(tab.getPrincipalName(),
tab.getPrincipalType(), mpart, tab.getPrivilege(), now,
tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption()));
}
}
if (tabColumnGrants != null) {
for (MTableColumnPrivilege col : tabColumnGrants) {
pm.makePersistent(new MPartitionColumnPrivilege(col.getPrincipalName(),
col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(),
now, col.getGrantor(), col.getGrantorType(), col.getGrantOption()));
}
}
}
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
@Override
public boolean addPartition(Partition part) throws InvalidObjectException,
MetaException {
boolean success = false;
boolean commited = false;
try {
MTable table = this.getMTable(part.getDbName(), part.getTableName());
List tabGrants = null;
List tabColumnGrants = null;
if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
tabGrants = this.listAllTableGrants(part
.getDbName(), part.getTableName());
tabColumnGrants = this.listTableAllColumnGrants(
part.getDbName(), part.getTableName());
}
openTransaction();
MPartition mpart = convertToMPart(part, true);
pm.makePersistent(mpart);
int now = (int)(System.currentTimeMillis()/1000);
List toPersist = new ArrayList();
if (tabGrants != null) {
for (MTablePrivilege tab: tabGrants) {
MPartitionPrivilege partGrant = new MPartitionPrivilege(tab
.getPrincipalName(), tab.getPrincipalType(),
mpart, tab.getPrivilege(), now, tab.getGrantor(), tab
.getGrantorType(), tab.getGrantOption());
toPersist.add(partGrant);
}
}
if (tabColumnGrants != null) {
for (MTableColumnPrivilege col : tabColumnGrants) {
MPartitionColumnPrivilege partColumn = new MPartitionColumnPrivilege(col
.getPrincipalName(), col.getPrincipalType(), mpart, col
.getColumnName(), col.getPrivilege(), now, col.getGrantor(), col
.getGrantorType(), col.getGrantOption());
toPersist.add(partColumn);
}
if (toPersist.size() > 0) {
pm.makePersistentAll(toPersist);
}
}
commited = commitTransaction();
success = true;
} finally {
if (!commited) {
rollbackTransaction();
}
}
return success;
}
@Override
public Partition getPartition(String dbName, String tableName,
List part_vals) throws NoSuchObjectException, MetaException {
openTransaction();
Partition part = convertToPart(getMPartition(dbName, tableName, part_vals));
commitTransaction();
if(part == null) {
throw new NoSuchObjectException("partition values="
+ part_vals.toString());
}
part.setValues(part_vals);
return part;
}
private MPartition getMPartition(String dbName, String tableName,
List part_vals) throws MetaException {
MPartition mpart = null;
boolean commited = false;
try {
openTransaction();
dbName = HiveStringUtils.normalizeIdentifier(dbName);
tableName = HiveStringUtils.normalizeIdentifier(tableName);
MTable mtbl = getMTable(dbName, tableName);
if (mtbl == null) {
commited = commitTransaction();
return null;
}
// Change the query to use part_vals instead of the name which is
// redundant TODO: callers of this often get part_vals out of name for no reason...
String name = Warehouse.makePartName(convertToFieldSchemas(mtbl
.getPartitionKeys()), part_vals);
Query query = pm.newQuery(MPartition.class,
"table.tableName == t1 && table.database.name == t2 && partitionName == t3");
query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
query.setUnique(true);
mpart = (MPartition) query.execute(tableName, dbName, name);
pm.retrieve(mpart);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
return mpart;
}
/**
* Convert a Partition object into an MPartition, which is an object backed by the db
* If the Partition's set of columns is the same as the parent table's AND useTableCD
* is true, then this partition's storage descriptor's column descriptor will point
* to the same one as the table's storage descriptor.
* @param part the partition to convert
* @param useTableCD whether to try to use the parent table's column descriptor.
* @return the model partition object
* @throws InvalidObjectException
* @throws MetaException
*/
private MPartition convertToMPart(Partition part, boolean useTableCD)
throws InvalidObjectException, MetaException {
if (part == null) {
return null;
}
MTable mt = getMTable(part.getDbName(), part.getTableName());
if (mt == null) {
throw new InvalidObjectException(
"Partition doesn't have a valid table or database name");
}
// If this partition's set of columns is the same as the parent table's,
// use the parent table's, so we do not create a duplicate column descriptor,
// thereby saving space
MStorageDescriptor msd;
if (useTableCD &&
mt.getSd() != null && mt.getSd().getCD() != null &&
mt.getSd().getCD().getCols() != null &&
part.getSd() != null &&
convertToFieldSchemas(mt.getSd().getCD().getCols()).
equals(part.getSd().getCols())) {
msd = convertToMStorageDescriptor(part.getSd(), mt.getSd().getCD());
} else {
msd = convertToMStorageDescriptor(part.getSd());
}
return new MPartition(Warehouse.makePartName(convertToFieldSchemas(mt
.getPartitionKeys()), part.getValues()), mt, part.getValues(), part
.getCreateTime(), part.getLastAccessTime(),
msd, part.getParameters());
}
private Partition convertToPart(MPartition mpart) throws MetaException {
if (mpart == null) {
return null;
}
return new Partition(convertList(mpart.getValues()), mpart.getTable().getDatabase()
.getName(), mpart.getTable().getTableName(), mpart.getCreateTime(),
mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd()),
convertMap(mpart.getParameters()));
}
private Partition convertToPart(String dbName, String tblName, MPartition mpart)
throws MetaException {
if (mpart == null) {
return null;
}
return new Partition(convertList(mpart.getValues()), dbName, tblName,
mpart.getCreateTime(), mpart.getLastAccessTime(),
convertToStorageDescriptor(mpart.getSd(), false), convertMap(mpart.getParameters()));
}
@Override
public boolean dropPartition(String dbName, String tableName,
List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException,
InvalidInputException {
boolean success = false;
try {
openTransaction();
MPartition part = getMPartition(dbName, tableName, part_vals);
dropPartitionCommon(part);
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
@Override
public void dropPartitions(String dbName, String tblName, List partNames)
throws MetaException, NoSuchObjectException {
if (partNames.isEmpty()) return;
boolean success = false;
openTransaction();
try {
// Delete all things.
dropPartitionGrantsNoTxn(dbName, tblName, partNames);
dropPartitionAllColumnGrantsNoTxn(dbName, tblName, partNames);
dropPartitionColumnStatisticsNoTxn(dbName, tblName, partNames);
// CDs are reused; go thry partition SDs, detach all CDs from SDs, then remove unused CDs.
for (MColumnDescriptor mcd : detachCdsFromSdsNoTxn(dbName, tblName, partNames)) {
removeUnusedColumnDescriptor(mcd);
}
dropPartitionsNoTxn(dbName, tblName, partNames);
if (!(success = commitTransaction())) {
throw new MetaException("Failed to drop partitions"); // Should not happen?
}
} finally {
if (!success) {
rollbackTransaction();
}
}
}
/**
* Drop an MPartition and cascade deletes (e.g., delete partition privilege grants,
* drop the storage descriptor cleanly, etc.)
* @param part - the MPartition to drop
* @return whether the transaction committed successfully
* @throws InvalidInputException
* @throws InvalidObjectException
* @throws MetaException
* @throws NoSuchObjectException
*/
private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectException, MetaException,
InvalidObjectException, InvalidInputException {
boolean success = false;
try {
openTransaction();
if (part != null) {
List schemas = part.getTable().getPartitionKeys();
List colNames = new ArrayList();
for (MFieldSchema col: schemas) {
colNames.add(col.getName());
}
String partName = FileUtils.makePartName(colNames, part.getValues());
List partGrants = listPartitionGrants(
part.getTable().getDatabase().getName(),
part.getTable().getTableName(),
Lists.newArrayList(partName));
if (partGrants != null && partGrants.size() > 0) {
pm.deletePersistentAll(partGrants);
}
List partColumnGrants = listPartitionAllColumnGrants(
part.getTable().getDatabase().getName(),
part.getTable().getTableName(),
Lists.newArrayList(partName));
if (partColumnGrants != null && partColumnGrants.size() > 0) {
pm.deletePersistentAll(partColumnGrants);
}
String dbName = part.getTable().getDatabase().getName();
String tableName = part.getTable().getTableName();
// delete partition level column stats if it exists
try {
deletePartitionColumnStatistics(dbName, tableName, partName, part.getValues(), null);
} catch (NoSuchObjectException e) {
LOG.info("No column statistics records found to delete");
}
preDropStorageDescriptor(part.getSd());
pm.deletePersistent(part);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
@Override
public List getPartitions(
String dbName, String tableName, int maxParts) throws MetaException, NoSuchObjectException {
return getPartitionsInternal(dbName, tableName, maxParts, true, true);
}
protected List getPartitionsInternal(
String dbName, String tblName, final int maxParts, boolean allowSql, boolean allowJdo)
throws MetaException, NoSuchObjectException {
return new GetListHelper(dbName, tblName, allowSql, allowJdo) {
@Override
protected List getSqlResult(GetHelper> ctx) throws MetaException {
Integer max = (maxParts < 0) ? null : maxParts;
return directSql.getPartitions(dbName, tblName, max);
}
@Override
protected List getJdoResult(
GetHelper> ctx) throws MetaException, NoSuchObjectException {
return convertToParts(listMPartitions(dbName, tblName, maxParts));
}
}.run(false);
}
@Override
public List getPartitionsWithAuth(String dbName, String tblName,
short max, String userName, List groupNames)
throws MetaException, NoSuchObjectException, InvalidObjectException {
boolean success = false;
try {
openTransaction();
List mparts = listMPartitions(dbName, tblName, max);
List parts = new ArrayList(mparts.size());
if (mparts != null && mparts.size()>0) {
for (MPartition mpart : mparts) {
MTable mtbl = mpart.getTable();
Partition part = convertToPart(mpart);
parts.add(part);
if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl
.getPartitionKeys()), part.getValues());
PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName,
tblName, partName, userName, groupNames);
part.setPrivileges(partAuth);
}
}
}
success = commitTransaction();
return parts;
} finally {
if (!success) {
rollbackTransaction();
}
}
}
@Override
public Partition getPartitionWithAuth(String dbName, String tblName,
List partVals, String user_name, List group_names)
throws NoSuchObjectException, MetaException, InvalidObjectException {
boolean success = false;
try {
openTransaction();
MPartition mpart = getMPartition(dbName, tblName, partVals);
if (mpart == null) {
commitTransaction();
throw new NoSuchObjectException("partition values="
+ partVals.toString());
}
Partition part = null;
MTable mtbl = mpart.getTable();
part = convertToPart(mpart);
if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl
.getPartitionKeys()), partVals);
PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName,
tblName, partName, user_name, group_names);
part.setPrivileges(partAuth);
}
success = commitTransaction();
return part;
} finally {
if (!success) {
rollbackTransaction();
}
}
}
private List convertToParts(List mparts) throws MetaException {
return convertToParts(mparts, null);
}
private List convertToParts(List src, List dest)
throws MetaException {
if (src == null) {
return dest;
}
if (dest == null) {
dest = new ArrayList(src.size());
}
for (MPartition mp : src) {
dest.add(convertToPart(mp));
Deadline.checkTimeout();
}
return dest;
}
private List convertToParts(String dbName, String tblName, List mparts)
throws MetaException {
List parts = new ArrayList(mparts.size());
for (MPartition mp : mparts) {
parts.add(convertToPart(dbName, tblName, mp));
Deadline.checkTimeout();
}
return parts;
}
// TODO:pc implement max
@Override
public List listPartitionNames(String dbName, String tableName,
short max) throws MetaException {
List pns = null;
boolean success = false;
try {
openTransaction();
LOG.debug("Executing getPartitionNames");
pns = getPartitionNamesNoTxn(dbName, tableName, max);
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return pns;
}
private List getPartitionNamesNoTxn(String dbName, String tableName, short max) {
List pns = new ArrayList();
dbName = HiveStringUtils.normalizeIdentifier(dbName);
tableName = HiveStringUtils.normalizeIdentifier(tableName);
Query q = pm.newQuery(
"select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
+ "where table.database.name == t1 && table.tableName == t2 "
+ "order by partitionName asc");
q.declareParameters("java.lang.String t1, java.lang.String t2");
q.setResult("partitionName");
if(max > 0) {
q.setRange(0, max);
}
Collection names = (Collection) q.execute(dbName, tableName);
for (Iterator i = names.iterator(); i.hasNext();) {
pns.add((String) i.next());
}
return pns;
}
/**
* Retrieves a Collection of partition-related results from the database that match
* the partial specification given for a specific table.
* @param dbName the name of the database
* @param tableName the name of the table
* @param part_vals the partial specification values
* @param max_parts the maximum number of partitions to return
* @param resultsCol the metadata column of the data to return, e.g. partitionName, etc.
* if resultsCol is empty or null, a collection of MPartition objects is returned
* @throws NoSuchObjectException
* @results A Collection of partition-related items from the db that match the partial spec
* for a table. The type of each item in the collection corresponds to the column
* you want results for. E.g., if resultsCol is partitionName, the Collection
* has types of String, and if resultsCol is null, the types are MPartition.
*/
private Collection getPartitionPsQueryResults(String dbName, String tableName,
List part_vals, short max_parts, String resultsCol)
throws MetaException, NoSuchObjectException {
dbName = HiveStringUtils.normalizeIdentifier(dbName);
tableName = HiveStringUtils.normalizeIdentifier(tableName);
Table table = getTable(dbName, tableName);
if (table == null) {
throw new NoSuchObjectException(dbName + "." + tableName + " table not found");
}
List partCols = table.getPartitionKeys();
int numPartKeys = partCols.size();
if (part_vals.size() > numPartKeys) {
throw new MetaException("Incorrect number of partition values");
}
partCols = partCols.subList(0, part_vals.size());
//Construct a pattern of the form: partKey=partVal/partKey2=partVal2/...
// where partVal is either the escaped partition value given as input,
// or a regex of the form ".*"
//This works because the "=" and "/" separating key names and partition key/values
// are not escaped.
String partNameMatcher = Warehouse.makePartName(partCols, part_vals, ".*");
//add ".*" to the regex to match anything else afterwards the partial spec.
if (part_vals.size() < numPartKeys) {
partNameMatcher += ".*";
}
Query q = pm.newQuery(MPartition.class);
StringBuilder queryFilter = new StringBuilder("table.database.name == dbName");
queryFilter.append(" && table.tableName == tableName");
queryFilter.append(" && partitionName.matches(partialRegex)");
q.setFilter(queryFilter.toString());
q.declareParameters("java.lang.String dbName, " +
"java.lang.String tableName, java.lang.String partialRegex");
if( max_parts >= 0 ) {
//User specified a row limit, set it on the Query
q.setRange(0, max_parts);
}
if (resultsCol != null && !resultsCol.isEmpty()) {
q.setResult(resultsCol);
}
return (Collection) q.execute(dbName, tableName, partNameMatcher);
}
@Override
public List listPartitionsPsWithAuth(String db_name, String tbl_name,
List part_vals, short max_parts, String userName, List groupNames)
throws MetaException, InvalidObjectException, NoSuchObjectException {
List partitions = new ArrayList();
boolean success = false;
try {
openTransaction();
LOG.debug("executing listPartitionNamesPsWithAuth");
Collection parts = getPartitionPsQueryResults(db_name, tbl_name,
part_vals, max_parts, null);
MTable mtbl = getMTable(db_name, tbl_name);
for (Object o : parts) {
Partition part = convertToPart((MPartition) o);
//set auth privileges
if (null != userName && null != groupNames &&
"TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl
.getPartitionKeys()), part.getValues());
PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(db_name,
tbl_name, partName, userName, groupNames);
part.setPrivileges(partAuth);
}
partitions.add(part);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return partitions;
}
@Override
public List listPartitionNamesPs(String dbName, String tableName,
List part_vals, short max_parts) throws MetaException, NoSuchObjectException {
List partitionNames = new ArrayList();
boolean success = false;
try {
openTransaction();
LOG.debug("Executing listPartitionNamesPs");
Collection names = getPartitionPsQueryResults(dbName, tableName,
part_vals, max_parts, "partitionName");
for (Object o : names) {
partitionNames.add((String) o);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return partitionNames;
}
// TODO:pc implement max
private List listMPartitions(String dbName, String tableName,
int max) {
boolean success = false;
List mparts = null;
try {
openTransaction();
LOG.debug("Executing listMPartitions");
dbName = HiveStringUtils.normalizeIdentifier(dbName);
tableName = HiveStringUtils.normalizeIdentifier(tableName);
Query query = pm.newQuery(MPartition.class,
"table.tableName == t1 && table.database.name == t2");
query.declareParameters("java.lang.String t1, java.lang.String t2");
query.setOrdering("partitionName ascending");
if(max > 0) {
query.setRange(0, max);
}
mparts = (List) query.execute(tableName, dbName);
LOG.debug("Done executing query for listMPartitions");
pm.retrieveAll(mparts);
success = commitTransaction();
LOG.debug("Done retrieving all objects for listMPartitions " + mparts);
} finally {
if (!success) {
rollbackTransaction();
}
}
return mparts;
}
@Override
public List getPartitionsByNames(String dbName, String tblName,
List partNames) throws MetaException, NoSuchObjectException {
return getPartitionsByNamesInternal(dbName, tblName, partNames, true, true);
}
protected List getPartitionsByNamesInternal(String dbName, String tblName,
final List partNames, boolean allowSql, boolean allowJdo)
throws MetaException, NoSuchObjectException {
return new GetListHelper(dbName, tblName, allowSql, allowJdo) {
@Override
protected List getSqlResult(GetHelper> ctx) throws MetaException {
return directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames);
}
@Override
protected List getJdoResult(
GetHelper> ctx) throws MetaException, NoSuchObjectException {
return getPartitionsViaOrmFilter(dbName, tblName, partNames);
}
}.run(false);
}
@Override
public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
String defaultPartitionName, short maxParts, List result) throws TException {
return getPartitionsByExprInternal(
dbName, tblName, expr, defaultPartitionName, maxParts, result, true, true);
}
protected boolean getPartitionsByExprInternal(String dbName, String tblName, final byte[] expr,
final String defaultPartitionName, final short maxParts, List result,
boolean allowSql, boolean allowJdo) throws TException {
assert result != null;
// We will try pushdown first, so make the filter. This will also validate the expression,
// if serialization fails we will throw incompatible metastore error to the client.
String filter = null;
try {
filter = expressionProxy.convertExprToFilter(expr);
} catch (MetaException ex) {
throw new IMetaStoreClient.IncompatibleMetastoreException(ex.getMessage());
}
// Make a tree out of the filter.
// TODO: this is all pretty ugly. The only reason we need all these transformations
// is to maintain support for simple filters for HCat users that query metastore.
// If forcing everyone to use thick client is out of the question, maybe we could
// parse the filter into standard hive expressions and not all this separate tree
// Filter.g stuff. That way this method and ...ByFilter would just be merged.
final ExpressionTree exprTree = makeExpressionTree(filter);
final AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false);
result.addAll(new GetListHelper(dbName, tblName, allowSql, allowJdo) {
@Override
protected List getSqlResult(GetHelper> ctx) throws MetaException {
// If we have some sort of expression tree, try SQL filter pushdown.
List result = null;
if (exprTree != null) {
result = directSql.getPartitionsViaSqlFilter(ctx.getTable(), exprTree, null);
}
if (result == null) {
// We couldn't do SQL filter pushdown. Get names via normal means.
List partNames = new LinkedList();
hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn(
ctx.getTable(), expr, defaultPartitionName, maxParts, partNames));
result = directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames);
}
return result;
}
@Override
protected List getJdoResult(
GetHelper> ctx) throws MetaException, NoSuchObjectException {
// If we have some sort of expression tree, try JDOQL filter pushdown.
List result = null;
if (exprTree != null) {
result = getPartitionsViaOrmFilter(ctx.getTable(), exprTree, maxParts, false);
}
if (result == null) {
// We couldn't do JDOQL filter pushdown. Get names via normal means.
List partNames = new ArrayList();
hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn(
ctx.getTable(), expr, defaultPartitionName, maxParts, partNames));
result = getPartitionsViaOrmFilter(dbName, tblName, partNames);
}
return result;
}
}.run(true));
return hasUnknownPartitions.get();
}
private class LikeChecker extends ExpressionTree.TreeVisitor {
private boolean hasLike;
public boolean hasLike() {
return hasLike;
}
@Override
protected boolean shouldStop() {
return hasLike;
}
@Override
protected void visit(LeafNode node) throws MetaException {
hasLike = hasLike || (node.operator == Operator.LIKE);
}
}
/**
* Makes expression tree out of expr.
* @param filter Filter.
* @return Expression tree. Null if there was an error.
*/
private ExpressionTree makeExpressionTree(String filter) throws MetaException {
// TODO: ExprNodeDesc is an expression tree, we could just use that and be rid of Filter.g.
if (filter == null || filter.isEmpty()) {
return ExpressionTree.EMPTY_TREE;
}
LOG.debug("Filter specified is " + filter);
ExpressionTree tree = null;
try {
tree = getFilterParser(filter).tree;
} catch (MetaException ex) {
LOG.info("Unable to make the expression tree from expression string ["
+ filter + "]" + ex.getMessage()); // Don't log the stack, this is normal.
}
if (tree == null) {
return null;
}
// We suspect that LIKE pushdown into JDO is invalid; see HIVE-5134. Check for like here.
LikeChecker lc = new LikeChecker();
tree.accept(lc);
return lc.hasLike() ? null : tree;
}
/**
* Gets the partition names from a table, pruned using an expression.
* @param table Table.
* @param expr Expression.
* @param defaultPartName Default partition name from job config, if any.
* @param maxParts Maximum number of partition names to return.
* @param result The resulting names.
* @return Whether the result contains any unknown partitions.
*/
private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr,
String defaultPartName, short maxParts, List result) throws MetaException {
result.addAll(getPartitionNamesNoTxn(
table.getDbName(), table.getTableName(), maxParts));
List columnNames = new ArrayList();
List typeInfos = new ArrayList();
for (FieldSchema fs : table.getPartitionKeys()) {
columnNames.add(fs.getName());
typeInfos.add(TypeInfoFactory.getPrimitiveTypeInfo(fs.getType()));
}
if (defaultPartName == null || defaultPartName.isEmpty()) {
defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME);
}
return expressionProxy.filterPartitionsByExpr(
columnNames, typeInfos, expr, defaultPartName, result);
}
/**
* Gets partition names from the table via ORM (JDOQL) filter pushdown.
* @param table The table.
* @param tree The expression tree from which JDOQL filter will be made.
* @param maxParts Maximum number of partitions to return.
* @param isValidatedFilter Whether the filter was pre-validated for JDOQL pushdown by a client
* (old hive client or non-hive one); if it was and we fail to create a filter, we will throw.
* @return Resulting partitions. Can be null if isValidatedFilter is false, and
* there was error deriving the JDO filter.
*/
private List getPartitionsViaOrmFilter(Table table, ExpressionTree tree,
short maxParts, boolean isValidatedFilter) throws MetaException {
Map params = new HashMap();
String jdoFilter = makeQueryFilterString(
table.getDbName(), table, tree, params, isValidatedFilter);
if (jdoFilter == null) {
assert !isValidatedFilter;
return null;
}
Query query = pm.newQuery(MPartition.class, jdoFilter);
if (maxParts >= 0) {
// User specified a row limit, set it on the Query
query.setRange(0, maxParts);
}
String parameterDeclaration = makeParameterDeclarationStringObj(params);
query.declareParameters(parameterDeclaration);
query.setOrdering("partitionName ascending");
@SuppressWarnings("unchecked")
List mparts = (List) query.executeWithMap(params);
LOG.debug("Done executing query for getPartitionsViaOrmFilter");
pm.retrieveAll(mparts); // TODO: why is this inconsistent with what we get by names?
LOG.debug("Done retrieving all objects for getPartitionsViaOrmFilter");
List results = convertToParts(mparts);
query.closeAll();
return results;
}
private static class Out {
public T val;
}
/**
* Gets partition names from the table via ORM (JDOQL) name filter.
* @param dbName Database name.
* @param tblName Table name.
* @param partNames Partition names to get the objects for.
* @return Resulting partitions.
*/
private List getPartitionsViaOrmFilter(
String dbName, String tblName, List partNames) throws MetaException {
if (partNames.isEmpty()) {
return new ArrayList();
}
Out query = new Out();
List mparts = null;
try {
mparts = getMPartitionsViaOrmFilter(dbName, tblName, partNames, query);
return convertToParts(dbName, tblName, mparts);
} finally {
if (query.val != null) {
query.val.closeAll();
}
}
}
private void dropPartitionsNoTxn(String dbName, String tblName, List partNames) {
ObjectPair> queryWithParams =
getPartQueryWithParams(dbName, tblName, partNames);
Query query = queryWithParams.getFirst();
query.setClass(MPartition.class);
long deleted = query.deletePersistentAll(queryWithParams.getSecond());
LOG.debug("Deleted " + deleted + " partition from store");
query.closeAll();
}
/**
* Detaches column descriptors from storage descriptors; returns the set of unique CDs
* thus detached. This is done before dropping partitions because CDs are reused between
* SDs; so, we remove the links to delete SDs and then check the returned CDs to see if
* they are referenced by other SDs.
*/
private HashSet detachCdsFromSdsNoTxn(
String dbName, String tblName, List partNames) {
ObjectPair> queryWithParams =
getPartQueryWithParams(dbName, tblName, partNames);
Query query = queryWithParams.getFirst();
query.setClass(MPartition.class);
query.setResult("sd");
@SuppressWarnings("unchecked")
List sds = (List)query.executeWithMap(
queryWithParams.getSecond());
HashSet candidateCds = new HashSet();
for (MStorageDescriptor sd : sds) {
if (sd != null && sd.getCD() != null) {
candidateCds.add(sd.getCD());
sd.setCD(null);
}
}
return candidateCds;
}
private List getMPartitionsViaOrmFilter(String dbName,
String tblName, List partNames, Out out) {
ObjectPair> queryWithParams =
getPartQueryWithParams(dbName, tblName, partNames);
Query query = out.val = queryWithParams.getFirst();
query.setResultClass(MPartition.class);
query.setClass(MPartition.class);
query.setOrdering("partitionName ascending");
@SuppressWarnings("unchecked")
List result = (List)query.executeWithMap(queryWithParams.getSecond());
return result;
}
private ObjectPair> getPartQueryWithParams(
String dbName, String tblName, List partNames) {
StringBuilder sb = new StringBuilder(
"table.tableName == t1 && table.database.name == t2 && (");
int n = 0;
Map params = new HashMap();
for (Iterator itr = partNames.iterator(); itr.hasNext();) {
String pn = "p" + n;
n++;
String part = itr.next();
params.put(pn, part);
sb.append("partitionName == ").append(pn);
sb.append(" || ");
}
sb.setLength(sb.length() - 4); // remove the last " || "
sb.append(')');
Query query = pm.newQuery();
query.setFilter(sb.toString());
LOG.debug(" JDOQL filter is " + sb.toString());
params.put("t1", HiveStringUtils.normalizeIdentifier(tblName));
params.put("t2", HiveStringUtils.normalizeIdentifier(dbName));
query.declareParameters(makeParameterDeclarationString(params));
return new ObjectPair>(query, params);
}
@Override
public List getPartitionsByFilter(String dbName, String tblName,
String filter, short maxParts) throws MetaException, NoSuchObjectException {
return getPartitionsByFilterInternal(dbName, tblName, filter, maxParts, true, true);
}
/** Helper class for getting stuff w/transaction, direct SQL, perf logging, etc. */
private abstract class GetHelper {
private final boolean isInTxn, doTrace, allowJdo;
private boolean doUseDirectSql;
private long start;
private Table table;
protected final String dbName, tblName;
private boolean success = false;
protected T results = null;
public GetHelper(String dbName, String tblName, boolean allowSql, boolean allowJdo)
throws MetaException {
assert allowSql || allowJdo;
this.allowJdo = allowJdo;
this.dbName = HiveStringUtils.normalizeIdentifier(dbName);
if (tblName != null){
this.tblName = HiveStringUtils.normalizeIdentifier(tblName);
} else {
// tblName can be null in cases of Helper being used at a higher
// abstraction level, such as with datbases
this.tblName = null;
this.table = null;
}
this.doTrace = LOG.isDebugEnabled();
this.isInTxn = isActiveTransaction();
// SQL usage inside a larger transaction (e.g. droptable) may not be desirable because
// some databases (e.g. Postgres) abort the entire transaction when any query fails, so
// the fallback from failed SQL to JDO is not possible.
boolean isConfigEnabled = HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL)
&& (HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL_DDL) || !isInTxn);
if (!allowJdo && isConfigEnabled && !directSql.isCompatibleDatastore()) {
throw new MetaException("SQL is not operational"); // test path; SQL is enabled and broken.
}
this.doUseDirectSql = allowSql && isConfigEnabled && directSql.isCompatibleDatastore();
}
protected abstract String describeResult();
protected abstract T getSqlResult(GetHelper ctx) throws MetaException;
protected abstract T getJdoResult(
GetHelper ctx) throws MetaException, NoSuchObjectException;
public T run(boolean initTable) throws MetaException, NoSuchObjectException {
try {
start(initTable);
if (doUseDirectSql) {
try {
setResult(getSqlResult(this));
} catch (Exception ex) {
handleDirectSqlError(ex);
}
}
if (!doUseDirectSql) {
setResult(getJdoResult(this));
}
return commit();
} catch (NoSuchObjectException ex) {
throw ex;
} catch (MetaException ex) {
throw ex;
} catch (Exception ex) {
LOG.error("", ex);
throw new MetaException(ex.getMessage());
} finally {
close();
}
}
private void start(boolean initTable) throws MetaException, NoSuchObjectException {
start = doTrace ? System.nanoTime() : 0;
openTransaction();
if (initTable && (tblName != null)) {
table = ensureGetTable(dbName, tblName);
}
}
private boolean setResult(T results) {
this.results = results;
return this.results != null;
}
private void handleDirectSqlError(Exception ex) throws MetaException, NoSuchObjectException {
LOG.warn("Direct SQL failed" + (allowJdo ? ", falling back to ORM" : ""), ex);
if (!allowJdo) {
if (ex instanceof MetaException) {
throw (MetaException)ex;
}
throw new MetaException(ex.getMessage());
}
if (!isInTxn) {
rollbackTransaction();
start = doTrace ? System.nanoTime() : 0;
openTransaction();
if (table != null) {
table = ensureGetTable(dbName, tblName);
}
} else {
start = doTrace ? System.nanoTime() : 0;
}
doUseDirectSql = false;
}
public void disableDirectSql() {
this.doUseDirectSql = false;
}
private T commit() {
success = commitTransaction();
if (doTrace) {
LOG.debug(describeResult() + " retrieved using " + (doUseDirectSql ? "SQL" : "ORM")
+ " in " + ((System.nanoTime() - start) / 1000000.0) + "ms");
}
return results;
}
private void close() {
if (!success) {
rollbackTransaction();
}
}
public Table getTable() {
return table;
}
}
private abstract class GetListHelper extends GetHelper> {
public GetListHelper(
String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException {
super(dbName, tblName, allowSql, allowJdo);
}
@Override
protected String describeResult() {
return results.size() + " entries";
}
}
private abstract class GetDbHelper extends GetHelper {
/**
* GetHelper for returning db info using directSql/JDO.
* Since this is a db-level call, tblName is ignored, and null is passed irrespective of what is passed in.
* @param dbName The Database Name
* @param tblName Placeholder param to match signature, always ignored.
* @param allowSql Whether or not we allow DirectSQL to perform this query.
* @param allowJdo Whether or not we allow ORM to perform this query.
* @throws MetaException
*/
public GetDbHelper(
String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException {
super(dbName,null,allowSql,allowJdo);
}
@Override
protected String describeResult() {
return "db details for db " + dbName;
}
}
private abstract class GetStatHelper extends GetHelper {
public GetStatHelper(
String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException {
super(dbName, tblName, allowSql, allowJdo);
}
@Override
protected String describeResult() {
return "statistics for " + (results == null ? 0 : results.getStatsObjSize()) + " columns";
}
}
protected List getPartitionsByFilterInternal(String dbName, String tblName,
String filter, final short maxParts, boolean allowSql, boolean allowJdo)
throws MetaException, NoSuchObjectException {
final ExpressionTree tree = (filter != null && !filter.isEmpty())
? getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE;
return new GetListHelper(dbName, tblName, allowSql, allowJdo) {
@Override
protected List getSqlResult(GetHelper> ctx) throws MetaException {
List parts = directSql.getPartitionsViaSqlFilter(
ctx.getTable(), tree, (maxParts < 0) ? null : (int)maxParts);
if (parts == null) {
// Cannot push down SQL filter. The message has been logged internally.
// This is not an error so don't roll back, just go to JDO.
ctx.disableDirectSql();
}
return parts;
}
@Override
protected List getJdoResult(
GetHelper> ctx) throws MetaException, NoSuchObjectException {
return getPartitionsViaOrmFilter(ctx.getTable(), tree, maxParts, true);
}
}.run(true);
}
/**
* Gets the table object for a given table, throws if anything goes wrong.
* @param dbName Database name.
* @param tblName Table name.
* @return Table object.
*/
private MTable ensureGetMTable(
String dbName, String tblName) throws NoSuchObjectException, MetaException {
MTable mtable = getMTable(dbName, tblName);
if (mtable == null) {
throw new NoSuchObjectException("Specified database/table does not exist : "
+ dbName + "." + tblName);
}
return mtable;
}
private Table ensureGetTable(
String dbName, String tblName) throws NoSuchObjectException, MetaException {
return convertToTable(ensureGetMTable(dbName, tblName));
}
private FilterParser getFilterParser(String filter) throws MetaException {
FilterLexer lexer = new FilterLexer(new ANTLRNoCaseStringStream(filter));
CommonTokenStream tokens = new CommonTokenStream(lexer);
FilterParser parser = new FilterParser(tokens);
try {
parser.filter();
} catch(RecognitionException re) {
throw new MetaException("Error parsing partition filter; lexer error: "
+ lexer.errorMsg + "; exception " + re);
}
if (lexer.errorMsg != null) {
throw new MetaException("Error parsing partition filter : " + lexer.errorMsg);
}
return parser;
}
/**
* Makes a JDO query filter string.
* Makes a JDO query filter string for tables or partitions.
* @param dbName Database name.
* @param mtable Table. If null, the query returned is over tables in a database.
* If not null, the query returned is over partitions in a table.
* @param filter The filter from which JDOQL filter will be made.
* @param params Parameters for the filter. Some parameters may be added here.
* @return Resulting filter.
*/
private String makeQueryFilterString(String dbName, MTable mtable, String filter,
Map params) throws MetaException {
ExpressionTree tree = (filter != null && !filter.isEmpty())
? getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE;
return makeQueryFilterString(dbName, convertToTable(mtable), tree, params, true);
}
/**
* Makes a JDO query filter string for tables or partitions.
* @param dbName Database name.
* @param table Table. If null, the query returned is over tables in a database.
* If not null, the query returned is over partitions in a table.
* @param tree The expression tree from which JDOQL filter will be made.
* @param params Parameters for the filter. Some parameters may be added here.
* @param isValidatedFilter Whether the filter was pre-validated for JDOQL pushdown
* by the client; if it was and we fail to create a filter, we will throw.
* @return Resulting filter. Can be null if isValidatedFilter is false, and there was error.
*/
private String makeQueryFilterString(String dbName, Table table, ExpressionTree tree,
Map params, boolean isValidatedFilter) throws MetaException {
assert tree != null;
FilterBuilder queryBuilder = new FilterBuilder(isValidatedFilter);
if (table != null) {
queryBuilder.append("table.tableName == t1 && table.database.name == t2");
params.put("t1", table.getTableName());
params.put("t2", table.getDbName());
} else {
queryBuilder.append("database.name == dbName");
params.put("dbName", dbName);
}
tree.generateJDOFilterFragment(getConf(), table, params, queryBuilder);
if (queryBuilder.hasError()) {
assert !isValidatedFilter;
LOG.info("JDO filter pushdown cannot be used: " + queryBuilder.getErrorMessage());
return null;
}
String jdoFilter = queryBuilder.getFilter();
LOG.debug("jdoFilter = " + jdoFilter);
return jdoFilter;
}
private String makeParameterDeclarationString(Map params) {
//Create the parameter declaration string
StringBuilder paramDecl = new StringBuilder();
for (String key : params.keySet()) {
paramDecl.append(", java.lang.String " + key);
}
return paramDecl.toString();
}
private String makeParameterDeclarationStringObj(Map params) {
//Create the parameter declaration string
StringBuilder paramDecl = new StringBuilder();
for (Entry entry : params.entrySet()) {
paramDecl.append(", ");
paramDecl.append(entry.getValue().getClass().getName());
paramDecl.append(" ");
paramDecl.append(entry.getKey());
}
return paramDecl.toString();
}
@Override
public List listTableNamesByFilter(String dbName, String filter, short maxTables)
throws MetaException {
boolean success = false;
List tableNames = new ArrayList();
try {
openTransaction();
LOG.debug("Executing listTableNamesByFilter");
dbName = HiveStringUtils.normalizeIdentifier(dbName);
Map params = new HashMap();
String queryFilterString = makeQueryFilterString(dbName, null, filter, params);
Query query = pm.newQuery(MTable.class);
query.declareImports("import java.lang.String");
query.setResult("tableName");
query.setResultClass(java.lang.String.class);
if (maxTables >= 0) {
query.setRange(0, maxTables);
}
LOG.debug("filter specified is " + filter + "," + " JDOQL filter is " + queryFilterString);
for (Entry entry : params.entrySet()) {
LOG.debug("key: " + entry.getKey() + " value: " + entry.getValue() +
" class: " + entry.getValue().getClass().getName());
}
String parameterDeclaration = makeParameterDeclarationStringObj(params);
query.declareParameters(parameterDeclaration);
query.setFilter(queryFilterString);
Collection names = (Collection) query.executeWithMap(params);
//have to emulate "distinct", otherwise tables with the same name may be returned
Set tableNamesSet = new HashSet();
for (Iterator i = names.iterator(); i.hasNext();) {
tableNamesSet.add((String) i.next());
}
tableNames = new ArrayList(tableNamesSet);
LOG.debug("Done executing query for listTableNamesByFilter");
success = commitTransaction();
LOG.debug("Done retrieving all objects for listTableNamesByFilter");
} finally {
if (!success) {
rollbackTransaction();
}
}
return tableNames;
}
@Override
public List listPartitionNamesByFilter(String dbName, String tableName,
String filter, short maxParts) throws MetaException {
boolean success = false;
List partNames = new ArrayList();
try {
openTransaction();
LOG.debug("Executing listMPartitionNamesByFilter");
dbName = HiveStringUtils.normalizeIdentifier(dbName);
tableName = HiveStringUtils.normalizeIdentifier(tableName);
MTable mtable = getMTable(dbName, tableName);
if( mtable == null ) {
// To be consistent with the behavior of listPartitionNames, if the
// table or db does not exist, we return an empty list
return partNames;
}
Map params = new HashMap();
String queryFilterString = makeQueryFilterString(dbName, mtable, filter, params);
Query query = pm.newQuery(
"select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
+ "where " + queryFilterString);
if( maxParts >= 0 ) {
//User specified a row limit, set it on the Query
query.setRange(0, maxParts);
}
LOG.debug("Filter specified is " + filter + "," +
" JDOQL filter is " + queryFilterString);
LOG.debug("Parms is " + params);
String parameterDeclaration = makeParameterDeclarationStringObj(params);
query.declareParameters(parameterDeclaration);
query.setOrdering("partitionName ascending");
query.setResult("partitionName");
Collection names = (Collection) query.executeWithMap(params);
partNames = new ArrayList();
for (Iterator i = names.iterator(); i.hasNext();) {
partNames.add((String) i.next());
}
LOG.debug("Done executing query for listMPartitionNamesByFilter");
success = commitTransaction();
LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter");
} finally {
if (!success) {
rollbackTransaction();
}
}
return partNames;
}
@Override
public void alterTable(String dbname, String name, Table newTable)
throws InvalidObjectException, MetaException {
boolean success = false;
try {
openTransaction();
name = HiveStringUtils.normalizeIdentifier(name);
dbname = HiveStringUtils.normalizeIdentifier(dbname);
MTable newt = convertToMTable(newTable);
if (newt == null) {
throw new InvalidObjectException("new table is invalid");
}
MTable oldt = getMTable(dbname, name);
if (oldt == null) {
throw new MetaException("table " + name + " doesn't exist");
}
// For now only alter name, owner, parameters, cols, bucketcols are allowed
oldt.setDatabase(newt.getDatabase());
oldt.setTableName(HiveStringUtils.normalizeIdentifier(newt.getTableName()));
oldt.setParameters(newt.getParameters());
oldt.setOwner(newt.getOwner());
// Fully copy over the contents of the new SD into the old SD,
// so we don't create an extra SD in the metastore db that has no references.
copyMSD(newt.getSd(), oldt.getSd());
oldt.setRetention(newt.getRetention());
oldt.setPartitionKeys(newt.getPartitionKeys());
oldt.setTableType(newt.getTableType());
oldt.setLastAccessTime(newt.getLastAccessTime());
oldt.setViewOriginalText(newt.getViewOriginalText());
oldt.setViewExpandedText(newt.getViewExpandedText());
// commit the changes
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
}
@Override
public void alterIndex(String dbname, String baseTblName, String name, Index newIndex)
throws InvalidObjectException, MetaException {
boolean success = false;
try {
openTransaction();
name = HiveStringUtils.normalizeIdentifier(name);
baseTblName = HiveStringUtils.normalizeIdentifier(baseTblName);
dbname = HiveStringUtils.normalizeIdentifier(dbname);
MIndex newi = convertToMIndex(newIndex);
if (newi == null) {
throw new InvalidObjectException("new index is invalid");
}
MIndex oldi = getMIndex(dbname, baseTblName, name);
if (oldi == null) {
throw new MetaException("index " + name + " doesn't exist");
}
// For now only alter parameters are allowed
oldi.setParameters(newi.getParameters());
// commit the changes
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
}
private void alterPartitionNoTxn(String dbname, String name, List part_vals,
Partition newPart) throws InvalidObjectException, MetaException {
name = HiveStringUtils.normalizeIdentifier(name);
dbname = HiveStringUtils.normalizeIdentifier(dbname);
MPartition oldp = getMPartition(dbname, name, part_vals);
MPartition newp = convertToMPart(newPart, false);
if (oldp == null || newp == null) {
throw new InvalidObjectException("partition does not exist.");
}
oldp.setValues(newp.getValues());
oldp.setPartitionName(newp.getPartitionName());
oldp.setParameters(newPart.getParameters());
if (!TableType.VIRTUAL_VIEW.name().equals(oldp.getTable().getTableType())) {
copyMSD(newp.getSd(), oldp.getSd());
}
if (newp.getCreateTime() != oldp.getCreateTime()) {
oldp.setCreateTime(newp.getCreateTime());
}
if (newp.getLastAccessTime() != oldp.getLastAccessTime()) {
oldp.setLastAccessTime(newp.getLastAccessTime());
}
}
@Override
public void alterPartition(String dbname, String name, List part_vals, Partition newPart)
throws InvalidObjectException, MetaException {
boolean success = false;
Exception e = null;
try {
openTransaction();
alterPartitionNoTxn(dbname, name, part_vals, newPart);
// commit the changes
success = commitTransaction();
} catch (Exception exception) {
e = exception;
} finally {
if (!success) {
rollbackTransaction();
MetaException metaException = new MetaException(
"The transaction for alter partition did not commit successfully.");
if (e != null) {
metaException.initCause(e);
}
throw metaException;
}
}
}
@Override
public void alterPartitions(String dbname, String name, List> part_vals,
List newParts) throws InvalidObjectException, MetaException {
boolean success = false;
Exception e = null;
try {
openTransaction();
Iterator> part_val_itr = part_vals.iterator();
for (Partition tmpPart: newParts) {
List tmpPartVals = part_val_itr.next();
alterPartitionNoTxn(dbname, name, tmpPartVals, tmpPart);
}
// commit the changes
success = commitTransaction();
} catch (Exception exception) {
e = exception;
} finally {
if (!success) {
rollbackTransaction();
MetaException metaException = new MetaException(
"The transaction for alter partition did not commit successfully.");
if (e != null) {
metaException.initCause(e);
}
throw metaException;
}
}
}
private void copyMSD(MStorageDescriptor newSd, MStorageDescriptor oldSd) {
oldSd.setLocation(newSd.getLocation());
MColumnDescriptor oldCD = oldSd.getCD();
// If the columns of the old column descriptor != the columns of the new one,
// then change the old storage descriptor's column descriptor.
// Convert the MFieldSchema's to their thrift object counterparts, because we maintain
// datastore identity (i.e., identity of the model objects are managed by JDO,
// not the application).
if (!(oldSd != null && oldSd.getCD() != null &&
oldSd.getCD().getCols() != null &&
newSd != null && newSd.getCD() != null &&
newSd.getCD().getCols() != null &&
convertToFieldSchemas(newSd.getCD().getCols()).
equals(convertToFieldSchemas(oldSd.getCD().getCols()))
)) {
oldSd.setCD(newSd.getCD());
}
//If oldCd does not have any more references, then we should delete it
// from the backend db
removeUnusedColumnDescriptor(oldCD);
oldSd.setBucketCols(newSd.getBucketCols());
oldSd.setCompressed(newSd.isCompressed());
oldSd.setInputFormat(newSd.getInputFormat());
oldSd.setOutputFormat(newSd.getOutputFormat());
oldSd.setNumBuckets(newSd.getNumBuckets());
oldSd.getSerDeInfo().setName(newSd.getSerDeInfo().getName());
oldSd.getSerDeInfo().setSerializationLib(
newSd.getSerDeInfo().getSerializationLib());
oldSd.getSerDeInfo().setParameters(newSd.getSerDeInfo().getParameters());
oldSd.setSkewedColNames(newSd.getSkewedColNames());
oldSd.setSkewedColValues(newSd.getSkewedColValues());
oldSd.setSkewedColValueLocationMaps(newSd.getSkewedColValueLocationMaps());
oldSd.setSortCols(newSd.getSortCols());
oldSd.setParameters(newSd.getParameters());
oldSd.setStoredAsSubDirectories(newSd.isStoredAsSubDirectories());
}
/**
* Checks if a column descriptor has any remaining references by storage descriptors
* in the db. If it does not, then delete the CD. If it does, then do nothing.
* @param oldCD the column descriptor to delete if it is no longer referenced anywhere
*/
private void removeUnusedColumnDescriptor(MColumnDescriptor oldCD) {
if (oldCD == null) {
return;
}
boolean success = false;
try {
openTransaction();
LOG.debug("execute removeUnusedColumnDescriptor");
List