org.apache.phoenix.jdbc.PhoenixConnection Maven / Gradle / Ivy
The newest version!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.jdbc;
import static com.google.common.base.Preconditions.checkNotNull;
import static java.util.Collections.emptyMap;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_PHOENIX_CONNECTIONS;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_PHOENIX_CONNECTIONS_ATTEMPTED_COUNTER;
import java.io.EOFException;
import java.io.IOException;
import java.io.PrintStream;
import java.io.Reader;
import java.lang.ref.WeakReference;
import java.sql.Array;
import java.sql.Blob;
import java.sql.CallableStatement;
import java.sql.Clob;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.NClob;
import java.sql.ParameterMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLClientInfoException;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.sql.SQLWarning;
import java.sql.SQLXML;
import java.sql.Savepoint;
import java.sql.Statement;
import java.sql.Struct;
import java.text.Format;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Consistency;
import org.apache.htrace.Sampler;
import org.apache.htrace.TraceScope;
import org.apache.phoenix.call.CallRunner;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
import org.apache.phoenix.execute.CommitException;
import org.apache.phoenix.execute.MutationState;
import org.apache.phoenix.expression.function.FunctionArgumentType;
import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
import org.apache.phoenix.iterate.DefaultTableResultIteratorFactory;
import org.apache.phoenix.iterate.ParallelIteratorFactory;
import org.apache.phoenix.iterate.TableResultIterator;
import org.apache.phoenix.iterate.TableResultIteratorFactory;
import org.apache.phoenix.jdbc.PhoenixStatement.PhoenixStatementParser;
import org.apache.phoenix.log.LogLevel;
import org.apache.phoenix.monitoring.MetricType;
import org.apache.phoenix.parse.PFunction;
import org.apache.phoenix.parse.PSchema;
import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.DelegateConnectionQueryServices;
import org.apache.phoenix.query.MetaDataMutated;
import org.apache.phoenix.query.PropertyPolicyProvider;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PMetaData;
import org.apache.phoenix.schema.PMetaData.Pruner;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTableKey;
import org.apache.phoenix.schema.PTableRef;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.SchemaNotFoundException;
import org.apache.phoenix.schema.TableNotFoundException;
import org.apache.phoenix.schema.types.PArrayDataType;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.schema.types.PDate;
import org.apache.phoenix.schema.types.PDecimal;
import org.apache.phoenix.schema.types.PTime;
import org.apache.phoenix.schema.types.PTimestamp;
import org.apache.phoenix.schema.types.PUnsignedDate;
import org.apache.phoenix.schema.types.PUnsignedTime;
import org.apache.phoenix.schema.types.PUnsignedTimestamp;
import org.apache.phoenix.schema.types.PVarbinary;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.transaction.PhoenixTransactionContext;
import org.apache.phoenix.util.DateUtil;
import org.apache.phoenix.util.EnvironmentEdgeManager;
import org.apache.phoenix.util.JDBCUtil;
import org.apache.phoenix.util.NumberUtil;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.SQLCloseable;
import org.apache.phoenix.util.SQLCloseables;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.VarBinaryFormatter;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMap.Builder;
import com.google.common.collect.Lists;
/**
*
* JDBC Connection implementation of Phoenix. Currently the following are
* supported: - Statement - PreparedStatement The connection may only be used
* with the following options: - ResultSet.TYPE_FORWARD_ONLY -
* Connection.TRANSACTION_READ_COMMITTED
*
*
* @since 0.1
*/
public class PhoenixConnection implements Connection, MetaDataMutated, SQLCloseable {
private final String url;
private String schema;
private final ConnectionQueryServices services;
private final Properties info;
private final Map, Format> formatters = new HashMap<>();
private final int mutateBatchSize;
private final long mutateBatchSizeBytes;
private final Long scn;
private final boolean buildingIndex;
private MutationState mutationState;
private List statements = new ArrayList<>();
private boolean isAutoFlush = false;
private boolean isAutoCommit = false;
private PMetaData metaData;
private final PName tenantId;
private final String datePattern;
private final String timePattern;
private final String timestampPattern;
private int statementExecutionCounter;
private TraceScope traceScope = null;
private volatile boolean isClosed = false;
private Sampler> sampler;
private boolean readOnly = false;
private Consistency consistency = Consistency.STRONG;
private Map customTracingAnnotations = emptyMap();
private final boolean isRequestLevelMetricsEnabled;
private final boolean isDescVarLengthRowKeyUpgrade;
private ParallelIteratorFactory parallelIteratorFactory;
private final LinkedBlockingQueue> scannerQueue;
private TableResultIteratorFactory tableResultIteratorFactory;
private boolean isRunningUpgrade;
private LogLevel logLevel;
private Double logSamplingRate;
private String sourceOfOperation;
private final Object queueCreationLock = new Object(); // lock for the lazy init path of childConnections structure
private ConcurrentLinkedQueue childConnections = null;
//For now just the copy constructor paths will have this as true as I don't want to change the
//public interfaces.
private final boolean isInternalConnection;
static {
Tracing.addTraceMetricsSource();
}
private static Properties newPropsWithSCN(long scn, Properties props) {
props = new Properties(props);
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn));
return props;
}
public PhoenixConnection(PhoenixConnection connection,
boolean isDescRowKeyOrderUpgrade, boolean isRunningUpgrade)
throws SQLException {
this(connection.getQueryServices(), connection.getURL(), connection
.getClientInfo(), connection.metaData, connection
.getMutationState(), isDescRowKeyOrderUpgrade,
isRunningUpgrade, connection.buildingIndex, true);
this.isAutoCommit = connection.isAutoCommit;
this.isAutoFlush = connection.isAutoFlush;
this.sampler = connection.sampler;
this.statementExecutionCounter = connection.statementExecutionCounter;
}
public PhoenixConnection(PhoenixConnection connection) throws SQLException {
this(connection, connection.isDescVarLengthRowKeyUpgrade(), connection
.isRunningUpgrade());
}
public PhoenixConnection(PhoenixConnection connection,
MutationState mutationState) throws SQLException {
this(connection.getQueryServices(), connection.getURL(), connection
.getClientInfo(), connection.getMetaDataCache(), mutationState,
connection.isDescVarLengthRowKeyUpgrade(), connection
.isRunningUpgrade(), connection.buildingIndex, true);
}
public PhoenixConnection(PhoenixConnection connection, long scn)
throws SQLException {
this(connection, newPropsWithSCN(scn, connection.getClientInfo()));
}
public PhoenixConnection(PhoenixConnection connection, Properties props) throws SQLException {
this(connection.getQueryServices(), connection.getURL(), props, connection.metaData, connection
.getMutationState(), connection.isDescVarLengthRowKeyUpgrade(),
connection.isRunningUpgrade(), connection.buildingIndex, true);
this.isAutoCommit = connection.isAutoCommit;
this.isAutoFlush = connection.isAutoFlush;
this.sampler = connection.sampler;
this.statementExecutionCounter = connection.statementExecutionCounter;
}
public PhoenixConnection(ConnectionQueryServices services, String url,
Properties info, PMetaData metaData) throws SQLException {
this(services, url, info, metaData, null, false, false, false, false);
}
public PhoenixConnection(PhoenixConnection connection,
ConnectionQueryServices services, Properties info)
throws SQLException {
this(services, connection.url, info, connection.metaData, null,
connection.isDescVarLengthRowKeyUpgrade(), connection
.isRunningUpgrade(), connection.buildingIndex, true);
}
private PhoenixConnection(ConnectionQueryServices services, String url,
Properties info, PMetaData metaData, MutationState mutationState,
boolean isDescVarLengthRowKeyUpgrade, boolean isRunningUpgrade,
boolean buildingIndex, boolean isInternalConnection) throws SQLException {
GLOBAL_PHOENIX_CONNECTIONS_ATTEMPTED_COUNTER.increment();
this.url = url;
this.isDescVarLengthRowKeyUpgrade = isDescVarLengthRowKeyUpgrade;
this.isInternalConnection = isInternalConnection;
// Filter user provided properties based on property policy, if
// provided and QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED is true
if (Boolean.valueOf(info.getProperty(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED,
String.valueOf(QueryServicesOptions.DEFAULT_PROPERTY_POLICY_PROVIDER_ENABLED)))) {
PropertyPolicyProvider.getPropertyPolicy().evaluate(info);
}
// Copy so client cannot change
this.info = info == null ? new Properties() : PropertiesUtil
.deepCopy(info);
final PName tenantId = JDBCUtil.getTenantId(url, info);
if (this.info.isEmpty() && tenantId == null) {
this.services = services;
} else {
// Create child services keyed by tenantId to track resource usage
// for
// a tenantId for all connections on this JVM.
if (tenantId != null) {
services = services.getChildQueryServices(tenantId
.getBytesPtr());
}
ReadOnlyProps currentProps = services.getProps();
final ReadOnlyProps augmentedProps = currentProps
.addAll(filterKnownNonProperties(this.info));
this.services = augmentedProps == currentProps ? services
: new DelegateConnectionQueryServices(services) {
@Override
public ReadOnlyProps getProps() {
return augmentedProps;
}
};
}
Long scnParam = JDBCUtil.getCurrentSCN(url, this.info);
checkScn(scnParam);
Long buildIndexAtParam = JDBCUtil.getBuildIndexSCN(url, this.info);
checkBuildIndexAt(buildIndexAtParam);
checkScnAndBuildIndexAtEquality(scnParam, buildIndexAtParam);
this.scn = scnParam != null ? scnParam : buildIndexAtParam;
this.buildingIndex = buildingIndex || buildIndexAtParam != null;
this.isAutoFlush = this.services.getProps().getBoolean(
QueryServices.TRANSACTIONS_ENABLED,
QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED)
&& this.services.getProps().getBoolean(
QueryServices.AUTO_FLUSH_ATTRIB,
QueryServicesOptions.DEFAULT_AUTO_FLUSH);
this.isAutoCommit = JDBCUtil.getAutoCommit(
url,
this.info,
this.services.getProps().getBoolean(
QueryServices.AUTO_COMMIT_ATTRIB,
QueryServicesOptions.DEFAULT_AUTO_COMMIT));
this.consistency = JDBCUtil.getConsistencyLevel(
url,
this.info,
this.services.getProps().get(QueryServices.CONSISTENCY_ATTRIB,
QueryServicesOptions.DEFAULT_CONSISTENCY_LEVEL));
// currently we are not resolving schema set through property, so if
// schema doesn't exists ,connection will not fail
// but queries may fail
this.schema = JDBCUtil.getSchema(
url,
this.info,
this.services.getProps().get(QueryServices.SCHEMA_ATTRIB,
QueryServicesOptions.DEFAULT_SCHEMA));
this.tenantId = tenantId;
this.mutateBatchSize = JDBCUtil.getMutateBatchSize(url, this.info,
this.services.getProps());
this.mutateBatchSizeBytes = JDBCUtil.getMutateBatchSizeBytes(url,
this.info, this.services.getProps());
datePattern = this.services.getProps().get(
QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT);
timePattern = this.services.getProps().get(
QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT);
timestampPattern = this.services.getProps().get(
QueryServices.TIMESTAMP_FORMAT_ATTRIB,
DateUtil.DEFAULT_TIMESTAMP_FORMAT);
String numberPattern = this.services.getProps().get(
QueryServices.NUMBER_FORMAT_ATTRIB,
NumberUtil.DEFAULT_NUMBER_FORMAT);
int maxSize = this.services.getProps().getInt(
QueryServices.MAX_MUTATION_SIZE_ATTRIB,
QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
long maxSizeBytes = this.services.getProps().getLong(
QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,
QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
String timeZoneID = this.services.getProps().get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
DateUtil.DEFAULT_TIME_ZONE_ID);
Format dateFormat = DateUtil.getDateFormatter(datePattern, timeZoneID);
Format timeFormat = DateUtil.getDateFormatter(timePattern, timeZoneID);
Format timestampFormat = DateUtil.getDateFormatter(timestampPattern, timeZoneID);
formatters.put(PDate.INSTANCE, dateFormat);
formatters.put(PTime.INSTANCE, timeFormat);
formatters.put(PTimestamp.INSTANCE, timestampFormat);
formatters.put(PUnsignedDate.INSTANCE, dateFormat);
formatters.put(PUnsignedTime.INSTANCE, timeFormat);
formatters.put(PUnsignedTimestamp.INSTANCE, timestampFormat);
formatters.put(PDecimal.INSTANCE,
FunctionArgumentType.NUMERIC.getFormatter(numberPattern));
formatters.put(PVarbinary.INSTANCE, VarBinaryFormatter.INSTANCE);
// We do not limit the metaData on a connection less than the global
// one,
// as there's not much that will be cached here.
Pruner pruner = new Pruner() {
@Override
public boolean prune(PTable table) {
long maxTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP
: scn;
return (table.getType() != PTableType.SYSTEM && (table
.getTimeStamp() >= maxTimestamp || (table.getTenantId() != null && !Objects
.equal(tenantId, table.getTenantId()))));
}
@Override
public boolean prune(PFunction function) {
long maxTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP
: scn;
return (function.getTimeStamp() >= maxTimestamp || (function
.getTenantId() != null && !Objects.equal(tenantId,
function.getTenantId())));
}
};
this.logLevel= LogLevel.valueOf(this.services.getProps().get(QueryServices.LOG_LEVEL,
QueryServicesOptions.DEFAULT_LOGGING_LEVEL));
this.isRequestLevelMetricsEnabled = JDBCUtil.isCollectingRequestLevelMetricsEnabled(url, info,
this.services.getProps());
this.mutationState = mutationState == null ? newMutationState(maxSize,
maxSizeBytes) : new MutationState(mutationState, this);
this.metaData = metaData;
this.metaData.pruneTables(pruner);
this.metaData.pruneFunctions(pruner);
this.services.addConnection(this);
// setup tracing, if its enabled
this.sampler = Tracing.getConfiguredSampler(this);
this.customTracingAnnotations = getImmutableCustomTracingAnnotations();
this.scannerQueue = new LinkedBlockingQueue<>();
this.tableResultIteratorFactory = new DefaultTableResultIteratorFactory();
this.isRunningUpgrade = isRunningUpgrade;
this.logSamplingRate = Double.parseDouble(this.services.getProps().get(QueryServices.LOG_SAMPLE_RATE,
QueryServicesOptions.DEFAULT_LOG_SAMPLE_RATE));
if(isInternalConnection) {
GLOBAL_OPEN_INTERNAL_PHOENIX_CONNECTIONS.increment();
} else {
GLOBAL_OPEN_PHOENIX_CONNECTIONS.increment();
}
this.sourceOfOperation =
this.services.getProps().get(QueryServices.SOURCE_OPERATION_ATTRIB, null);
}
private static void checkScn(Long scnParam) throws SQLException {
if (scnParam != null && scnParam < 0) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_SCN)
.build().buildException();
}
}
private static void checkBuildIndexAt(Long replayAtParam) throws SQLException {
if (replayAtParam != null && replayAtParam < 0) {
throw new SQLExceptionInfo.Builder(
SQLExceptionCode.INVALID_REPLAY_AT).build()
.buildException();
}
}
private static void checkScnAndBuildIndexAtEquality(Long scnParam, Long replayAt)
throws SQLException {
if (scnParam != null && replayAt != null && !scnParam.equals(replayAt)) {
throw new SQLExceptionInfo.Builder(
SQLExceptionCode.UNEQUAL_SCN_AND_BUILD_INDEX_AT).build()
.buildException();
}
}
private static Properties filterKnownNonProperties(Properties info) {
Properties prunedProperties = info;
for (String property : PhoenixRuntime.CONNECTION_PROPERTIES) {
if (info.containsKey(property)) {
if (prunedProperties == info) {
prunedProperties = PropertiesUtil.deepCopy(info);
}
prunedProperties.remove(property);
}
}
return prunedProperties;
}
private ImmutableMap getImmutableCustomTracingAnnotations() {
Builder result = ImmutableMap.builder();
result.putAll(JDBCUtil.getAnnotations(url, info));
if (getSCN() != null) {
result.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, getSCN().toString());
}
if (getTenantId() != null) {
result.put(PhoenixRuntime.TENANT_ID_ATTRIB, getTenantId()
.getString());
}
return result.build();
}
public boolean isInternalConnection() {
return isInternalConnection;
}
/**
* This method, and *only* this method is thread safe
* @param connection
*/
public void addChildConnection(PhoenixConnection connection) {
//double check for performance
if(childConnections == null) {
synchronized (queueCreationLock) {
if (childConnections == null) {
childConnections = new ConcurrentLinkedQueue<>();
}
}
}
childConnections.add(connection);
}
/**
* Method to remove child connection from childConnections Queue
*
* @param connection
*/
public void removeChildConnection(PhoenixConnection connection) {
if (childConnections != null) {
childConnections.remove(connection);
}
}
/**
* Method to fetch child connections count from childConnections Queue
*
* @return int count
*/
@VisibleForTesting
public int getChildConnectionsCount() {
if (childConnections != null) {
return childConnections.size();
}
return 0;
}
public Sampler> getSampler() {
return this.sampler;
}
public void setSampler(Sampler> sampler) throws SQLException {
this.sampler = sampler;
}
public Map getCustomTracingAnnotations() {
return customTracingAnnotations;
}
public int executeStatements(Reader reader, List
© 2015 - 2024 Weber Informatics LLC | Privacy Policy