All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.amazon.redshift.core.BaseConnection Maven / Gradle / Ivy

There is a newer version: 2.1.0.30
Show newest version
/*
 * Copyright (c) 2003, PostgreSQL Global Development Group
 * See the LICENSE file in the project root for more information.
 */

package com.amazon.redshift.core;

import com.amazon.redshift.RedshiftConnection;
import com.amazon.redshift.RedshiftProperty;
import com.amazon.redshift.jdbc.FieldMetadata;
import com.amazon.redshift.jdbc.TimestampUtils;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.LruCache;

import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.TimerTask;

/**
 * Driver-internal connection interface. Application code should not use this interface.
 */
public interface BaseConnection extends RedshiftConnection, Connection {
  /**
   * Cancel the current query executing on this connection.
   *
   * @throws SQLException if something goes wrong.
   */
  void cancelQuery() throws SQLException;

  /**
   * Execute a SQL query that returns a single resultset. Never causes a new transaction to be
   * started regardless of the autocommit setting.
   *
   * @param s the query to execute
   * @return the (non-null) returned resultset
   * @throws SQLException if something goes wrong.
   */
  ResultSet execSQLQuery(String s) throws SQLException;

  ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency)
      throws SQLException;

  /**
   * Execute a SQL query that does not return results. Never causes a new transaction to be started
   * regardless of the autocommit setting.
   *
   * @param s the query to execute
   * @throws SQLException if something goes wrong.
   */
  void execSQLUpdate(String s) throws SQLException;

  /**
   * Get the QueryExecutor implementation for this connection.
   *
   * @return the (non-null) executor
   */
  QueryExecutor getQueryExecutor();

  /**
   * Internal protocol for work with physical and logical replication. 
   * Unsupported in Redshift.
   *
   * @return not null replication protocol
   */
  ReplicationProtocol getReplicationProtocol();

  /**
   * 

Construct and return an appropriate object for the given type and value. This only considers * the types registered via {@link com.amazon.redshift.RedshiftConnection#addDataType(String, Class)} and * {@link com.amazon.redshift.RedshiftConnection#addDataType(String, String)}.

* *

If no class is registered as handling the given type, then a generic * {@link com.amazon.redshift.util.RedshiftObject} instance is returned.

* * @param type the backend typename * @param value the type-specific string representation of the value * @param byteValue the type-specific binary representation of the value * @return an appropriate object; never null. * @throws SQLException if something goes wrong */ Object getObject(String type, String value, byte[] byteValue) throws SQLException; Encoding getEncoding() throws SQLException; TypeInfo getTypeInfo(); /** *

Check if we have at least a particular server version.

* *

The input version is of the form xxyyzz, matching a Redshift version like xx.yy.zz. So 08.00.02 * is 080002.

* * @param ver the server version to check, of the form xxyyzz eg 90401 * @return true if the server version is at least "ver". */ boolean haveMinimumServerVersion(int ver); /** *

Check if we have at least a particular server version.

* *

The input version is of the form xxyyzz, matching a Redshift version like xx.yy.zz. So 8.0.2 * is 80002.

* * @param ver the server version to check * @return true if the server version is at least "ver". */ boolean haveMinimumServerVersion(Version ver); /** * Encode a string using the database's client_encoding (usually UTF8, but can vary on older * server versions). This is used when constructing synthetic resultsets (for example, in metadata * methods). * * @param str the string to encode * @return an encoded representation of the string * @throws SQLException if something goes wrong. */ byte[] encodeString(String str) throws SQLException; /** * Escapes a string for use as string-literal within an SQL command. The method chooses the * applicable escaping rules based on the value of {@link #getStandardConformingStrings()}. * * @param str a string value * @return the escaped representation of the string * @throws SQLException if the string contains a {@code \0} character */ String escapeString(String str) throws SQLException; /** * Escapes only quotes in string for catalog name. The method chooses the * applicable escaping rules based on the value of {@link #getStandardConformingStrings()}. * * @param str a string value * @return the escaped representation of the string * @throws SQLException if the string contains a {@code \0} character */ String escapeOnlyQuotesString(String str) throws SQLException; /** * Returns whether the server treats string-literals according to the SQL standard or if it uses * traditional Redshift escaping rules. Versions up to 8.1 always treated backslashes as escape * characters in string-literals. Since 8.2, this depends on the value of the * {@code standard_conforming_strings} server variable. * * @return true if the server treats string literals according to the SQL standard * @see QueryExecutor#getStandardConformingStrings() */ boolean getStandardConformingStrings(); // Ew. Quick hack to give access to the connection-specific utils implementation. TimestampUtils getTimestampUtils(); // Get the per-connection logger. RedshiftLogger getLogger(); // Get the bind-string-as-varchar config flag boolean getStringVarcharFlag(); /** * Get the current transaction state of this connection. * * @return current transaction state of this connection */ TransactionState getTransactionState(); /** * Returns true if value for the given oid should be sent using binary transfer. False if value * should be sent using text transfer. * * @param oid The oid to check. * @return True for binary transfer, false for text transfer. */ boolean binaryTransferSend(int oid); /** * Return whether to disable column name sanitation. * * @return true column sanitizer is disabled */ boolean isColumnSanitiserDisabled(); /** * Schedule a TimerTask for later execution. The task will be scheduled with the shared Timer for * this connection. * * @param timerTask timer task to schedule * @param milliSeconds delay in milliseconds */ void addTimerTask(TimerTask timerTask, long milliSeconds); /** * Invoke purge() on the underlying shared Timer so that internal resources will be released. */ void purgeTimerTasks(); /** * Return metadata cache for given connection. * * @return metadata cache */ LruCache getFieldMetadataCache(); CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized, String... columnNames) throws SQLException; /** * By default, the connection resets statement cache in case deallocate all/discard all * message is observed. * This API allows to disable that feature for testing purposes. * * @param flushCacheOnDeallocate true if statement cache should be reset when "deallocate/discard" message observed */ void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate); /** * Indicates if statements to backend should be hinted as read only. * * @return Indication if hints to backend (such as when transaction begins) * should be read only. * @see RedshiftProperty#READ_ONLY_MODE */ boolean hintReadOnly(); }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy