Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.metacat.connector.hive;
import com.google.common.base.Functions;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.netflix.metacat.common.QualifiedName;
import com.netflix.metacat.common.dto.Pageable;
import com.netflix.metacat.common.dto.Sort;
import com.netflix.metacat.common.server.connectors.ConnectorContext;
import com.netflix.metacat.common.server.connectors.ConnectorRequestContext;
import com.netflix.metacat.common.server.connectors.exception.ConnectorException;
import com.netflix.metacat.common.server.connectors.model.AuditInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionInfo;
import com.netflix.metacat.common.server.connectors.model.PartitionListRequest;
import com.netflix.metacat.common.server.connectors.model.StorageInfo;
import com.netflix.metacat.common.server.connectors.model.TableInfo;
import com.netflix.metacat.common.server.partition.parser.PartitionParser;
import com.netflix.metacat.common.server.partition.util.FilterPartition;
import com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval;
import com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval;
import com.netflix.metacat.common.server.util.JdbcUtil;
import com.netflix.metacat.common.server.util.ThreadServiceManager;
import com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter;
import com.netflix.metacat.connector.hive.monitoring.HiveMetrics;
import com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric;
import com.netflix.metacat.connector.hive.util.PartitionDetail;
import com.netflix.metacat.connector.hive.util.PartitionFilterGenerator;
import com.netflix.spectator.api.Registry;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.ResultSetExtractor;
import org.springframework.transaction.annotation.Transactional;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.sql.DataSource;
import java.io.StringReader;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
* HiveConnectorFastPartitionService.
*
* @author zhenl
* @since 1.0.0
*/
@Slf4j
@Transactional(readOnly = true)
public class HiveConnectorFastPartitionService extends HiveConnectorPartitionService {
private static final String FIELD_DATE_CREATED = "dateCreated";
private static final String FIELD_BATCHID = "batchid";
private static final String SQL_GET_PARTITIONS_WITH_KEY_URI =
"select p.PART_NAME as name, p.CREATE_TIME as dateCreated, sds.location uri"
+ " from PARTITIONS as p join TBLS as t on t.TBL_ID = p.TBL_ID "
+ "join DBS as d on t.DB_ID = d.DB_ID join SDS as sds on p.SD_ID = sds.SD_ID";
private static final String SQL_GET_PARTITIONS_WITH_KEY =
"select p.PART_NAME as name from PARTITIONS as p"
+ " join TBLS as t on t.TBL_ID = p.TBL_ID join DBS as d on t.DB_ID = d.DB_ID";
private static final String SQL_GET_PARTITIONS =
"select p.part_id as id, p.PART_NAME as name, p.CREATE_TIME as dateCreated,"
+ " sds.location uri, sds.input_format, sds.output_format,"
+ " sds.sd_id, s.serde_id, s.slib from PARTITIONS as p"
+ " join TBLS as t on t.TBL_ID = p.TBL_ID join DBS as d"
+ " on t.DB_ID = d.DB_ID join SDS as sds on p.SD_ID = sds.SD_ID"
+ " join SERDES s on sds.SERDE_ID=s.SERDE_ID";
private static final String SQL_GET_PARTITION_NAMES_BY_URI =
"select p.part_name partition_name,t.tbl_name table_name,d.name schema_name,"
+ " sds.location from PARTITIONS as p join TBLS as t on t.TBL_ID = p.TBL_ID"
+ " join DBS as d on t.DB_ID = d.DB_ID join SDS as sds on p.SD_ID = sds.SD_ID where";
private static final String SQL_GET_PARTITION_PARAMS =
"select part_id, param_key, param_value from PARTITION_PARAMS where 1=1";
private static final String SQL_GET_SD_PARAMS =
"select sd_id, param_key, param_value from SD_PARAMS where 1=1";
private static final String SQL_GET_SERDE_PARAMS =
"select serde_id, param_key, param_value from SERDE_PARAMS where 1=1";
private static final String SQL_GET_PARTITION_KEYS =
"select pkey_name, pkey_type from PARTITION_KEYS as p "
+ "join TBLS as t on t.TBL_ID = p.TBL_ID join DBS as d"
+ " on t.DB_ID = d.DB_ID where d.name=? and t.tbl_name=? order by integer_idx";
private static final String SQL_GET_PARTITION_COUNT =
"select count(*) count from PARTITIONS as p"
+ " join TBLS as t on t.TBL_ID = p.TBL_ID join DBS as d on t.DB_ID = d.DB_ID"
+ " join SDS as sds on p.SD_ID = sds.SD_ID where d.NAME = ? and t.TBL_NAME = ?";
private final ThreadServiceManager threadServiceManager;
private final Registry registry;
private final JdbcUtil jdbcUtil;
private final HiveConnectorFastServiceMetric fastServiceMetric;
/**
* Constructor.
*
* @param catalogName catalogname
* @param metacatHiveClient hive client
* @param hiveMetacatConverters hive converter
* @param connectorContext server context
* @param threadServiceManager thread service manager
* @param dataSource data source
* @param fastServiceMetric fast service metric
*/
public HiveConnectorFastPartitionService(
final String catalogName,
final IMetacatHiveClient metacatHiveClient,
final HiveConnectorInfoConverter hiveMetacatConverters,
final ConnectorContext connectorContext,
final ThreadServiceManager threadServiceManager,
final DataSource dataSource,
final HiveConnectorFastServiceMetric fastServiceMetric
) {
super(catalogName, metacatHiveClient, hiveMetacatConverters);
this.threadServiceManager = threadServiceManager;
this.registry = connectorContext.getRegistry();
this.jdbcUtil = new JdbcUtil(dataSource);
this.fastServiceMetric = fastServiceMetric;
}
/**
* Number of partitions for the given table.
*
* @param tableName tableName
* @return Number of partitions
*/
@Override
public int getPartitionCount(
final ConnectorRequestContext requestContext,
final QualifiedName tableName
) {
final long start = registry.clock().wallTime();
final Integer result;
// Handler for reading the result set
final ResultSetExtractor handler = rs -> {
int count = 0;
while (rs.next()) {
count = rs.getInt("count");
}
return count;
};
try {
result = jdbcUtil.getJdbcTemplate().query(SQL_GET_PARTITION_COUNT, handler,
tableName.getDatabaseName(), tableName.getTableName());
} catch (DataAccessException e) {
throw new ConnectorException("getPartitionCount", e);
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitionCount.getMetricName(), registry.clock().wallTime() - start);
}
return result;
}
/**
* {@inheritDoc}.
*/
@Override
public List getPartitions(
final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest
) {
final long start = registry.clock().wallTime();
try {
return this.getPartitions(
tableName.getDatabaseName(),
tableName.getTableName(),
partitionsRequest.getPartitionNames(),
partitionsRequest.getFilter(),
partitionsRequest.getSort(),
partitionsRequest.getPageable(),
partitionsRequest.getIncludePartitionDetails()
);
} finally {
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitions.getMetricName(), registry.clock().wallTime() - start);
}
}
/**
* {@inheritDoc}.
*/
@Override
public List getPartitionKeys(final ConnectorRequestContext requestContext,
final QualifiedName tableName,
final PartitionListRequest partitionsRequest) {
final long start = registry.clock().wallTime();
final List result;
final List partitionNames = partitionsRequest.getPartitionNames();
final Sort sort = partitionsRequest.getSort();
final Pageable pageable = partitionsRequest.getPageable();
final String filterExpression = partitionsRequest.getFilter();
if (filterExpression != null) {
final FilterPartition filter = new FilterPartition();
// batch exists
final boolean isBatched =
!Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated =
!Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
ResultSetExtractor> handler = rs -> {
final List names = Lists.newArrayList();
while (rs.next()) {
final String name = rs.getString("name");
final String uri = rs.getString("uri");
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression)
|| filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
names.add(name);
}
}
return names;
};
result = getHandlerResults(tableName.getDatabaseName(),
tableName.getTableName(), filterExpression, partitionNames,
SQL_GET_PARTITIONS_WITH_KEY_URI, handler, sort, pageable);
} else {
final ResultSetExtractor> handler = rs -> {
final List names = Lists.newArrayList();
while (rs.next()) {
names.add(rs.getString("name"));
}
return names;
};
result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(),
null, partitionNames, SQL_GET_PARTITIONS_WITH_KEY, handler, sort, pageable);
}
this.fastServiceMetric.recordTimer(
HiveMetrics.TagGetPartitionKeys.getMetricName(), registry.clock().wallTime() - start);
return result;
}
/**
* getPartitionNames.
*
* @param uris uris
* @param prefixSearch prefixSearch
* @return partition names
*/
@Override
public Map> getPartitionNames(
@Nonnull final ConnectorRequestContext context,
@Nonnull final List uris,
final boolean prefixSearch) {
final long start = registry.clock().wallTime();
final Map> result = Maps.newHashMap();
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(SQL_GET_PARTITION_NAMES_BY_URI);
final List params = Lists.newArrayList();
if (prefixSearch) {
queryBuilder.append(" 1=2");
uris.forEach(uri -> {
queryBuilder.append(" or location like ?");
params.add(uri + "%");
});
} else {
queryBuilder.append(" location in (");
Joiner.on(',').appendTo(queryBuilder, uris.stream().map(uri -> "?").collect(Collectors.toList()));
queryBuilder.append(")");
params.addAll(uris);
}
final ResultSetExtractor