All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.amazon.redshift.jdbc.RedshiftPreparedStatement Maven / Gradle / Ivy

There is a newer version: 2.1.0.30
Show newest version
/*
 * Copyright (c) 2004, PostgreSQL Global Development Group
 * See the LICENSE file in the project root for more information.
 */

package com.amazon.redshift.jdbc;

import com.amazon.redshift.Driver;
import com.amazon.redshift.core.BaseConnection;
import com.amazon.redshift.core.CachedQuery;
import com.amazon.redshift.core.Oid;
import com.amazon.redshift.core.ParameterList;
import com.amazon.redshift.core.Query;
import com.amazon.redshift.core.QueryExecutor;
import com.amazon.redshift.core.TypeInfo;
import com.amazon.redshift.core.v3.BatchedQuery;
import com.amazon.redshift.largeobject.LargeObject;
import com.amazon.redshift.largeobject.LargeObjectManager;
import com.amazon.redshift.logger.RedshiftLogger;
import com.amazon.redshift.util.ByteConverter;
import com.amazon.redshift.util.ByteStreamWriter;
import com.amazon.redshift.util.GT;
import com.amazon.redshift.util.HStoreConverter;
import com.amazon.redshift.util.RedshiftBinaryObject;
import com.amazon.redshift.util.RedshiftTime;
import com.amazon.redshift.util.RedshiftTimestamp;
import com.amazon.redshift.util.RedshiftIntervalYearToMonth;
import com.amazon.redshift.util.RedshiftIntervalDayToSecond;
import com.amazon.redshift.util.RedshiftObject;
import com.amazon.redshift.util.RedshiftException;
import com.amazon.redshift.util.RedshiftState;
import com.amazon.redshift.util.ReaderInputStream;

import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.UnsupportedEncodingException;
import java.io.Writer;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
import java.nio.charset.Charset;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.NClob;
import java.sql.ParameterMetaData;
import java.sql.PreparedStatement;
import java.sql.Ref;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLXML;
import java.sql.Statement;
import java.sql.Time;
import java.sql.Timestamp;
import java.sql.Types;
//JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2"
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.OffsetDateTime;
//JCP! endif
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Map;
import java.util.TimeZone;
// import java.util.UUID;

public class RedshiftPreparedStatement extends RedshiftStatementImpl implements PreparedStatement {
  protected final CachedQuery preparedQuery; // Query fragments for prepared statement.
  protected final ParameterList preparedParameters; // Parameter values for prepared statement.

  private TimeZone defaultTimeZone;
  protected boolean enableGeneratedName;

  RedshiftPreparedStatement(RedshiftConnectionImpl connection, String sql, int rsType, int rsConcurrency,
      int rsHoldability) throws SQLException {
    this(connection, connection.borrowQuery(sql), rsType, rsConcurrency, rsHoldability);
  }

  RedshiftPreparedStatement(RedshiftConnectionImpl connection, CachedQuery query, int rsType,
      int rsConcurrency, int rsHoldability) throws SQLException {
    super(connection, rsType, rsConcurrency, rsHoldability);

    this.preparedQuery = query;
    this.preparedParameters = this.preparedQuery.query.createParameterList();
    // TODO: this.wantsGeneratedKeysAlways = true;

    setPoolable(true); // As per JDBC spec: prepared and callable statements are poolable by
    enableGeneratedName = connection.getGeneratedName();
  }

  @Override
  public ResultSet executeQuery(String sql) throws SQLException {
    throw new RedshiftException(
        GT.tr("Can''t use query methods that take a query string on a PreparedStatement."),
        RedshiftState.WRONG_OBJECT_TYPE);
  }

  /*
   * A Prepared SQL query is executed and its ResultSet is returned
   *
   * @return a ResultSet that contains the data produced by the * query - never null
   *
   * @exception SQLException if a database access error occurs
   */
  @Override
  public ResultSet executeQuery() throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true);
  	
    if (!executeWithFlags(0)) {
      throw new RedshiftException(GT.tr("No results were returned by the query."), RedshiftState.NO_DATA);
    }

    ResultSet rs = getSingleResultSet();
    
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(false, rs);
    
    return rs;
  }

  @Override
  public int executeUpdate(String sql) throws SQLException {
    throw new RedshiftException(
        GT.tr("Can''t use query methods that take a query string on a PreparedStatement."),
        RedshiftState.WRONG_OBJECT_TYPE);
  }

  @Override
  public int executeUpdate() throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true);
  	
  	if(this.autoGeneratedKeys == Statement.RETURN_GENERATED_KEYS)
	    wantsGeneratedKeysOnce = true;
    
    executeWithFlags(QueryExecutor.QUERY_NO_RESULTS);
    checkNoResultUpdate();
    int rc = getUpdateCount();
    
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(false, rc);
    
    return rc;
  }

  //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2"
  @Override
  public long executeLargeUpdate() throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true);
  	
  	if(this.autoGeneratedKeys == Statement.RETURN_GENERATED_KEYS)
	    wantsGeneratedKeysOnce = true;
    
    executeWithFlags(QueryExecutor.QUERY_NO_RESULTS);
    checkNoResultUpdate();
    long rc = getLargeUpdateCount();
    
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(false, rc);
    
    return rc;
  }
  //JCP! endif

  @Override
  public boolean execute(String sql) throws SQLException {
    throw new RedshiftException(
        GT.tr("Can''t use query methods that take a query string on a PreparedStatement."),
        RedshiftState.WRONG_OBJECT_TYPE);
  }

  @Override
  public boolean execute() throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true);
  	
    boolean rc = executeWithFlags(0);

    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(false, rc);
    
    return rc;
  }

  public boolean executeWithFlags(int flags) throws SQLException {
    try {
      checkClosed();

      if (connection.getPreferQueryMode() == PreferQueryMode.SIMPLE) {
        flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
      }

      execute(preparedQuery, preparedParameters, flags);

      synchronized (this) {
        checkClosed();
        return (result != null && result.getResultSet() != null);
      }
    } finally {
      defaultTimeZone = null;
    }
  }

  protected boolean isOneShotQuery(CachedQuery cachedQuery) {
    if (cachedQuery == null) {
      cachedQuery = preparedQuery;
    }
    boolean rc = super.isOneShotQuery(cachedQuery);
    
    // Prepare query can return as !OneShot based on enableGeneratedName setting,
    // So the driver can user server side statement using 
    // generated statement name and same way portal. 
    return enableGeneratedName ? false : rc; 
  }

  @Override
  public void closeImpl() throws SQLException {
    if (preparedQuery != null) {
      ((RedshiftConnectionImpl) connection).releaseQuery(preparedQuery);
      super.closeImpl();      
    }
  }

  public void setNull(int parameterIndex, int sqlType) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, sqlType);
  	
    checkClosed();

    if (parameterIndex < 1 || parameterIndex > preparedParameters.getParameterCount()) {
      throw new RedshiftException(
        GT.tr("The column index is out of range: {0}, number of columns: {1}.",
          parameterIndex, preparedParameters.getParameterCount()),
        RedshiftState.INVALID_PARAMETER_VALUE);
    }

    int oid;
    switch (sqlType) {
      case Types.SQLXML:
        oid = Oid.XML;
        break;
      case Types.INTEGER:
        oid = Oid.INT4;
        break;
      case Types.TINYINT:
      case Types.SMALLINT:
        oid = Oid.INT2;
        break;
      case Types.BIGINT:
        oid = Oid.INT8;
        break;
      case Types.REAL:
        oid = Oid.FLOAT4;
        break;
      case Types.DOUBLE:
      case Types.FLOAT:
        oid = Oid.FLOAT8;
        break;
      case Types.DECIMAL:
      case Types.NUMERIC:
        oid = Oid.NUMERIC;
        break;
      case Types.CHAR:
        oid = Oid.BPCHAR;
        break;
      case Types.VARCHAR:
      case Types.LONGVARCHAR:
        oid = connection.getStringVarcharFlag() ? Oid.VARCHAR : Oid.UNSPECIFIED;
        break;
      case Types.DATE:
        oid = Oid.DATE;
        break;
      case Types.TIME:
      //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2"
      case Types.TIME_WITH_TIMEZONE:
      case Types.TIMESTAMP_WITH_TIMEZONE:
      //JCP! endif
      case Types.TIMESTAMP:
        oid = Oid.UNSPECIFIED;
        break;
      case Types.BOOLEAN:
      case Types.BIT:
        oid = Oid.BOOL;
        break;
      case Types.BINARY:
      case Types.VARBINARY:
      case Types.BLOB:
        oid = Oid.BYTEA;
        break;
      case Types.LONGVARBINARY:
        oid = Oid.VARBYTE; // For NULL it's ambiguity which one to use as both (Oid.VARBYTE & Oid.GEOGRAPHY) map to same SQL type. 
        break;
      case Types.CLOB: {
      	// In case of NULL, CLOB can be seen as VARCHAR
      	// This is useful in application like Spark dataframe which generates
      	// code to setNull as CLOB without seeing data source support it or not 
      	// as dataframe read must have happen using a CLOB supported database like MySQL or SQL Server.
        oid = Oid.VARCHAR; 
        break;
      }
      case Types.ARRAY:
      case Types.DISTINCT:
      case Types.STRUCT:
      case Types.NULL:
      case Types.OTHER:
        oid = Oid.UNSPECIFIED;
        break;
      default:
        // Bad Types value.
        throw new RedshiftException(GT.tr("Unknown Types value."), RedshiftState.INVALID_PARAMETER_TYPE);
    }
    preparedParameters.setNull(parameterIndex, oid);
  }

  public void setBoolean(int parameterIndex, boolean x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    checkClosed();
    // The key words TRUE and FALSE are the preferred (SQL-compliant) usage.
    bindLiteral(parameterIndex, x ? "TRUE" : "FALSE", Oid.BOOL);
  }

  public void setByte(int parameterIndex, byte x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    setShort(parameterIndex, x);
  }

  public void setShort(int parameterIndex, short x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    checkClosed();
    if (connection.binaryTransferSend(Oid.INT2)) {
      byte[] val = new byte[2];
      ByteConverter.int2(val, 0, x);
      bindBytes(parameterIndex, val, Oid.INT2);
      return;
    }
    bindLiteral(parameterIndex, Integer.toString(x), Oid.INT2);
  }

  public void setInt(int parameterIndex, int x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    checkClosed();
    if (connection.binaryTransferSend(Oid.INT4)) {
      byte[] val = new byte[4];
      ByteConverter.int4(val, 0, x);
      bindBytes(parameterIndex, val, Oid.INT4);
      return;
    }
    bindLiteral(parameterIndex, Integer.toString(x), Oid.INT4);
  }

  public void setLong(int parameterIndex, long x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    checkClosed();
    if (connection.binaryTransferSend(Oid.INT8)) {
      byte[] val = new byte[8];
      ByteConverter.int8(val, 0, x);
      bindBytes(parameterIndex, val, Oid.INT8);
      return;
    }
    bindLiteral(parameterIndex, Long.toString(x), Oid.INT8);
  }

  public void setFloat(int parameterIndex, float x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    checkClosed();
    if (connection.binaryTransferSend(Oid.FLOAT4)) {
      byte[] val = new byte[4];
      ByteConverter.float4(val, 0, x);
      bindBytes(parameterIndex, val, Oid.FLOAT4);
      return;
    }
    bindLiteral(parameterIndex, Float.toString(x), Oid.FLOAT8);
  }

  public void setDouble(int parameterIndex, double x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    checkClosed();
    if (connection.binaryTransferSend(Oid.FLOAT8)) {
      byte[] val = new byte[8];
      ByteConverter.float8(val, 0, x);
      bindBytes(parameterIndex, val, Oid.FLOAT8);
      return;
    }
    bindLiteral(parameterIndex, Double.toString(x), Oid.FLOAT8);
  }

  public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    setNumber(parameterIndex, x);
  }

  public void setString(int parameterIndex, String x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    checkClosed();
    setString(parameterIndex, x, getStringType());
  }

  private int getStringType() {
    return (connection.getStringVarcharFlag() ? Oid.VARCHAR : Oid.UNSPECIFIED);
  }

  protected void setString(int parameterIndex, String x, int oid) throws SQLException {
    // if the passed string is null, then set this column to null
    checkClosed();
    if (x == null) {
      preparedParameters.setNull(parameterIndex, oid);
    } else {
      bindString(parameterIndex, x, oid);
    }
  }

  public void setBytes(int parameterIndex, byte[] x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    checkClosed();

    if (null == x) {
      setNull(parameterIndex, Types.VARBINARY);
      return;
    }

    // Version 7.2 supports the bytea datatype for byte arrays
    byte[] copy = new byte[x.length];
    System.arraycopy(x, 0, copy, 0, x.length);
    preparedParameters.setBytea(parameterIndex, copy, 0, x.length);
  }

  private void setByteStreamWriter(int parameterIndex, ByteStreamWriter x) throws SQLException {
    preparedParameters.setBytea(parameterIndex, x);
  }

  public void setVarbyte(int parameterIndex, byte[] x) throws SQLException {
    if (RedshiftLogger.isEnable())
        connection.getLogger().logFunction(true, parameterIndex, x);
    
    checkClosed();

    if (null == x) {
      setNull(parameterIndex, Types.VARBINARY);
      return;
    }

    byte[] copy = new byte[x.length];
    System.arraycopy(x, 0, copy, 0, x.length);
    preparedParameters.setVarbyte(parameterIndex, copy, 0, x.length);
  }

  public void setGeography(int parameterIndex, byte[] x) throws SQLException {
    if (RedshiftLogger.isEnable())
        connection.getLogger().logFunction(true, parameterIndex, x);
    
    checkClosed();

    if (null == x) {
      setNull(parameterIndex, Types.VARBINARY);
      return;
    }

    byte[] copy = new byte[x.length];
    System.arraycopy(x, 0, copy, 0, x.length);
    preparedParameters.setGeography(parameterIndex, copy, 0, x.length);
  }
  
  public void setDate(int parameterIndex, java.sql.Date x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    setDate(parameterIndex, x, null);
  }

  public void setTime(int parameterIndex, Time x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    setTime(parameterIndex, x, null);
  }

  public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    setTimestamp(parameterIndex, x, null);
  }

  public void setIntervalYearToMonth(int parameterIndex, RedshiftIntervalYearToMonth x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    if (x == null) {
      setNull(parameterIndex, Types.OTHER);
      return;
    }

    if (connection.binaryTransferSend(Oid.INTERVALY2M)) {
      byte[] bytes = new byte[4];
      ByteConverter.int4(bytes, 0, (int) x.totalMonths());
      preparedParameters.setBinaryParameter(parameterIndex, bytes, Oid.INTERVALY2M);
      return;
    }

    bindString(parameterIndex, x.getValue(), Oid.UNSPECIFIED);
  }

  public void setIntervalDayToSecond(int parameterIndex, RedshiftIntervalDayToSecond x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    if (x == null) {
      setNull(parameterIndex, Types.OTHER);
      return;
    }

    if (connection.binaryTransferSend(Oid.INTERVALD2S)) {
      byte[] bytes = new byte[8];
      ByteConverter.int8(bytes, 0, (long) x.totalMicroseconds());
      preparedParameters.setBinaryParameter(parameterIndex, bytes, Oid.INTERVALD2S);
      return;
    }

    bindString(parameterIndex, x.getValue(), Oid.UNSPECIFIED);
  }

  private void setCharacterStreamPost71(int parameterIndex, InputStream x, int length,
      String encoding) throws SQLException {

    if (x == null) {
      setNull(parameterIndex, Types.VARCHAR);
      return;
    }
    if (length < 0) {
      throw new RedshiftException(GT.tr("Invalid stream length {0}.", length),
          RedshiftState.INVALID_PARAMETER_VALUE);
    }

    // Version 7.2 supports AsciiStream for all RS text types (char, varchar, text)
    // As the spec/javadoc for this method indicate this is to be used for
    // large String values (i.e. LONGVARCHAR) RS doesn't have a separate
    // long varchar datatype, but with toast all text datatypes are capable of
    // handling very large values. Thus the implementation ends up calling
    // setString() since there is no current way to stream the value to the server
    try {
      InputStreamReader inStream = new InputStreamReader(x, encoding);
      char[] chars = new char[length];
      int charsRead = 0;
      while (true) {
        int n = inStream.read(chars, charsRead, length - charsRead);
        if (n == -1) {
          break;
        }

        charsRead += n;

        if (charsRead == length) {
          break;
        }
      }

      setString(parameterIndex, new String(chars, 0, charsRead), Oid.VARCHAR);
    } catch (UnsupportedEncodingException uee) {
      throw new RedshiftException(GT.tr("The JVM claims not to support the {0} encoding.", encoding),
          RedshiftState.UNEXPECTED_ERROR, uee);
    } catch (IOException ioe) {
      throw new RedshiftException(GT.tr("Provided InputStream failed."), RedshiftState.UNEXPECTED_ERROR,
          ioe);
    }
  }

  public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException {
    checkClosed();
    setCharacterStreamPost71(parameterIndex, x, length, "ASCII");
  }

  public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x, length);
  	
    checkClosed();

    setCharacterStreamPost71(parameterIndex, x, length, "UTF-8");
  }

  public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException {
    checkClosed();

    if (x == null) {
      setNull(parameterIndex, Types.VARBINARY);
      return;
    }

    if (length < 0) {
      throw new RedshiftException(GT.tr("Invalid stream length {0}.", length),
          RedshiftState.INVALID_PARAMETER_VALUE);
    }

    // Version 7.2 supports BinaryStream for for the RS bytea type
    // As the spec/javadoc for this method indicate this is to be used for
    // large binary values (i.e. LONGVARBINARY) RS doesn't have a separate
    // long binary datatype, but with toast the bytea datatype is capable of
    // handling very large values.
    preparedParameters.setBytea(parameterIndex, x, length);
  }

  public void clearParameters() throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true);
  	
    preparedParameters.clear();
    
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(false);
  }

  // Helper method for setting parameters to RedshiftObject subclasses.
  private void setRedshiftObject(int parameterIndex, RedshiftObject x) throws SQLException {
    String typename = x.getType();
    int oid = connection.getTypeInfo().getRSType(typename);
    if (oid == Oid.UNSPECIFIED) {
      throw new RedshiftException(GT.tr("Unknown type {0}.", typename),
          RedshiftState.INVALID_PARAMETER_TYPE);
    }

    if ((x instanceof RedshiftBinaryObject) && connection.binaryTransferSend(oid)) {
      RedshiftBinaryObject binObj = (RedshiftBinaryObject) x;
      byte[] data = new byte[binObj.lengthInBytes()];
      binObj.toBytes(data, 0);
      bindBytes(parameterIndex, data, oid);
    } else {
      setString(parameterIndex, x.getValue(), oid);
    }
  }

  private void setMap(int parameterIndex, Map x) throws SQLException {
    int oid = connection.getTypeInfo().getRSType("hstore");
    if (oid == Oid.UNSPECIFIED) {
      throw new RedshiftException(GT.tr("No hstore extension installed."),
          RedshiftState.INVALID_PARAMETER_TYPE);
    }
    if (connection.binaryTransferSend(oid)) {
      byte[] data = HStoreConverter.toBytes(x, connection.getEncoding());
      bindBytes(parameterIndex, data, oid);
    } else {
      setString(parameterIndex, HStoreConverter.toString(x), oid);
    }
  }

  private void setNumber(int parameterIndex, Number x) throws SQLException {
    checkClosed();
    if (x == null) {
      setNull(parameterIndex, Types.DECIMAL);
    } else {
      if(x instanceof BigInteger)
        setString(parameterIndex, x.toString());
      else
        bindLiteral(parameterIndex, x.toString(), Oid.NUMERIC);
    }
  }

  @Override
  public void setObject(int parameterIndex, Object in, int targetSqlType, int scale)
      throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, in, targetSqlType, scale);
  	
    checkClosed();

    if (in == null) {
      setNull(parameterIndex, targetSqlType);
      return;
    }

/*    if (targetSqlType == Types.OTHER && in instanceof UUID
        && connection.haveMinimumServerVersion(ServerVersion.v8_3)) {
      setUuid(parameterIndex, (UUID) in);
      return;
    } */

    switch (targetSqlType) {
      case Types.SQLXML:
        if (in instanceof SQLXML) {
          setSQLXML(parameterIndex, (SQLXML) in);
        } else {
          setSQLXML(parameterIndex, new RedshiftSQLXML(connection, in.toString()));
        }
        break;
      case Types.INTEGER:
        setInt(parameterIndex, castToInt(in));
        break;
      case Types.TINYINT:
      case Types.SMALLINT:
        setShort(parameterIndex, castToShort(in));
        break;
      case Types.BIGINT:
        setLong(parameterIndex, castToLong(in));
        break;
      case Types.REAL:
        setFloat(parameterIndex, castToFloat(in));
        break;
      case Types.DOUBLE:
      case Types.FLOAT:
        setDouble(parameterIndex, castToDouble(in));
        break;
      case Types.DECIMAL:
      case Types.NUMERIC:
        setBigDecimal(parameterIndex, castToBigDecimal(in, scale));
        break;
      case Types.CHAR:
        setString(parameterIndex, castToString(in), Oid.BPCHAR);
        break;
      case Types.VARCHAR:
        setString(parameterIndex, castToString(in), getStringType());
        break;
      case Types.LONGVARCHAR:
        if (in instanceof InputStream) {
          preparedParameters.setText(parameterIndex, (InputStream)in);
        } else {
          setString(parameterIndex, castToString(in), getStringType());
        }
        break;
      case Types.DATE:
        if (in instanceof java.sql.Date) {
          setDate(parameterIndex, (java.sql.Date) in);
        } else {
          java.sql.Date tmpd;
          if (in instanceof java.util.Date) {
            tmpd = new java.sql.Date(((java.util.Date) in).getTime());
            //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2"
          } else if (in instanceof LocalDate) {
            setDate(parameterIndex, (LocalDate) in);
            break;
            //JCP! endif
          } else {
            tmpd = connection.getTimestampUtils().toDate(getDefaultCalendar(), in.toString());
          }
          setDate(parameterIndex, tmpd);
        }
        break;
      case Types.TIME:
        if (in instanceof java.sql.Time) {
          setTime(parameterIndex, (java.sql.Time) in);
        } else {
          java.sql.Time tmpt;
          if (in instanceof java.util.Date) {
            tmpt = new java.sql.Time(((java.util.Date) in).getTime());
            //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2"
          } else if (in instanceof LocalTime) {
            setTime(parameterIndex, (LocalTime) in);
            break;
            //JCP! endif
          } else {
            tmpt = connection.getTimestampUtils().toTime(getDefaultCalendar(), in.toString());
          }
          setTime(parameterIndex, tmpt);
        }
        break;
      case Types.TIMESTAMP:
        if (in instanceof RedshiftTimestamp) {
          setObject(parameterIndex, in);
        } else if (in instanceof java.sql.Timestamp) {
          setTimestamp(parameterIndex, (java.sql.Timestamp) in);
        } else {
          java.sql.Timestamp tmpts;
          if (in instanceof java.util.Date) {
            tmpts = new java.sql.Timestamp(((java.util.Date) in).getTime());
            //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2"
          } else if (in instanceof LocalDateTime) {
            setTimestamp(parameterIndex, (LocalDateTime) in);
            break;
            //JCP! endif
          } else {
            tmpts = connection.getTimestampUtils().toTimestamp(getDefaultCalendar(), in.toString());
          }
          setTimestamp(parameterIndex, tmpts);
        }
        break;
      //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2"
      case Types.TIMESTAMP_WITH_TIMEZONE:
        if (in instanceof OffsetDateTime) {
          setTimestamp(parameterIndex, (OffsetDateTime) in);
        } else if (in instanceof RedshiftTimestamp) {
          setObject(parameterIndex, in);
        } else {
          throw new RedshiftException(
              GT.tr("Cannot cast an instance of {0} to type {1}",
                  in.getClass().getName(), "Types.TIMESTAMP_WITH_TIMEZONE"),
              RedshiftState.INVALID_PARAMETER_TYPE);
        }
        break;
      //JCP! endif
      case Types.BOOLEAN:
      case Types.BIT:
        setBoolean(parameterIndex, BooleanTypeUtil.castToBoolean(in));
        break;
      case Types.BINARY:
      case Types.VARBINARY:
      case Types.LONGVARBINARY:
        setObject(parameterIndex, in);
        break;
      case Types.BLOB:
        if (in instanceof Blob) {
          setBlob(parameterIndex, (Blob) in);
        } else if (in instanceof InputStream) {
          long oid = createBlob(parameterIndex, (InputStream) in, -1);
          setLong(parameterIndex, oid);
        } else {
          throw new RedshiftException(
              GT.tr("Cannot cast an instance of {0} to type {1}",
                  in.getClass().getName(), "Types.BLOB"),
              RedshiftState.INVALID_PARAMETER_TYPE);
        }
        break;
      case Types.CLOB:
        if (in instanceof Clob) {
          setClob(parameterIndex, (Clob) in);
        } else {
          throw new RedshiftException(
              GT.tr("Cannot cast an instance of {0} to type {1}",
                  in.getClass().getName(), "Types.CLOB"),
              RedshiftState.INVALID_PARAMETER_TYPE);
        }
        break;
      case Types.ARRAY:
        if (in instanceof Array) {
          setArray(parameterIndex, (Array) in);
        } else if (PrimitiveArraySupport.isSupportedPrimitiveArray(in)) {
          setPrimitiveArray(parameterIndex, in);
        } else {
          throw new RedshiftException(
              GT.tr("Cannot cast an instance of {0} to type {1}",
                  in.getClass().getName(), "Types.ARRAY"),
              RedshiftState.INVALID_PARAMETER_TYPE);
        }
        break;
      case Types.DISTINCT:
        bindString(parameterIndex, in.toString(), Oid.UNSPECIFIED);
        break;
      case Types.OTHER:
        if (in instanceof RedshiftObject) {
          setRedshiftObject(parameterIndex, (RedshiftObject) in);
        } else if (in instanceof Map) {
          setMap(parameterIndex, (Map) in);
        } else {
          bindString(parameterIndex, in.toString(), Oid.UNSPECIFIED);
        }
        break;
      default:
        throw new RedshiftException(GT.tr("Unsupported Types value: {0}", targetSqlType),
            RedshiftState.INVALID_PARAMETER_TYPE);
    }
  }

  private  void setPrimitiveArray(int parameterIndex, A in) throws SQLException {
    final PrimitiveArraySupport arrayToString = PrimitiveArraySupport.getArraySupport(in);

    final TypeInfo typeInfo = connection.getTypeInfo();

    final int oid = arrayToString.getDefaultArrayTypeOid(typeInfo);

    if (arrayToString.supportBinaryRepresentation() && connection.getPreferQueryMode() != PreferQueryMode.SIMPLE) {
      bindBytes(parameterIndex, arrayToString.toBinaryRepresentation(connection, in), oid);
    } else {
      final char delim = typeInfo.getArrayDelimiter(oid);
      setString(parameterIndex, arrayToString.toArrayString(delim, in), oid);
    }
  }

  private static String asString(final Clob in) throws SQLException {
    return in.getSubString(1, (int) in.length());
  }

  private static int castToInt(final Object in) throws SQLException {
    try {
      if (in instanceof String) {
        return Integer.parseInt((String) in);
      }
      if (in instanceof Number) {
        return ((Number) in).intValue();
      }
      if (in instanceof java.util.Date) {
        return (int) ((java.util.Date) in).getTime();
      }
      if (in instanceof Boolean) {
        return (Boolean) in ? 1 : 0;
      }
      if (in instanceof Clob) {
        return Integer.parseInt(asString((Clob) in));
      }
      if (in instanceof Character) {
        return Integer.parseInt(in.toString());
      }
    } catch (final Exception e) {
      throw cannotCastException(in.getClass().getName(), "int", e);
    }
    throw cannotCastException(in.getClass().getName(), "int");
  }

  private static short castToShort(final Object in) throws SQLException {
    try {
      if (in instanceof String) {
        return Short.parseShort((String) in);
      }
      if (in instanceof Number) {
        return ((Number) in).shortValue();
      }
      if (in instanceof java.util.Date) {
        return (short) ((java.util.Date) in).getTime();
      }
      if (in instanceof Boolean) {
        return (Boolean) in ? (short) 1 : (short) 0;
      }
      if (in instanceof Clob) {
        return Short.parseShort(asString((Clob) in));
      }
      if (in instanceof Character) {
        return Short.parseShort(in.toString());
      }
    } catch (final Exception e) {
      throw cannotCastException(in.getClass().getName(), "short", e);
    }
    throw cannotCastException(in.getClass().getName(), "short");
  }

  private static long castToLong(final Object in) throws SQLException {
    try {
      if (in instanceof String) {
        return Long.parseLong((String) in);
      }
      if (in instanceof Number) {
        return ((Number) in).longValue();
      }
      if (in instanceof java.util.Date) {
        return ((java.util.Date) in).getTime();
      }
      if (in instanceof Boolean) {
        return (Boolean) in ? 1L : 0L;
      }
      if (in instanceof Clob) {
        return Long.parseLong(asString((Clob) in));
      }
      if (in instanceof Character) {
        return Long.parseLong(in.toString());
      }
    } catch (final Exception e) {
      throw cannotCastException(in.getClass().getName(), "long", e);
    }
    throw cannotCastException(in.getClass().getName(), "long");
  }

  private static float castToFloat(final Object in) throws SQLException {
    try {
      if (in instanceof String) {
        return Float.parseFloat((String) in);
      }
      if (in instanceof Number) {
        return ((Number) in).floatValue();
      }
      if (in instanceof java.util.Date) {
        return ((java.util.Date) in).getTime();
      }
      if (in instanceof Boolean) {
        return (Boolean) in ? 1f : 0f;
      }
      if (in instanceof Clob) {
        return Float.parseFloat(asString((Clob) in));
      }
      if (in instanceof Character) {
        return Float.parseFloat(in.toString());
      }
    } catch (final Exception e) {
      throw cannotCastException(in.getClass().getName(), "float", e);
    }
    throw cannotCastException(in.getClass().getName(), "float");
  }

  private static double castToDouble(final Object in) throws SQLException {
    try {
      if (in instanceof String) {
        return Double.parseDouble((String) in);
      }
      if (in instanceof Number) {
        return ((Number) in).doubleValue();
      }
      if (in instanceof java.util.Date) {
        return ((java.util.Date) in).getTime();
      }
      if (in instanceof Boolean) {
        return (Boolean) in ? 1d : 0d;
      }
      if (in instanceof Clob) {
        return Double.parseDouble(asString((Clob) in));
      }
      if (in instanceof Character) {
        return Double.parseDouble(in.toString());
      }
    } catch (final Exception e) {
      throw cannotCastException(in.getClass().getName(), "double", e);
    }
    throw cannotCastException(in.getClass().getName(), "double");
  }

  private static BigDecimal castToBigDecimal(final Object in, final int scale) throws SQLException {
    try {
      BigDecimal rc = null;
      if (in instanceof String) {
        rc = new BigDecimal((String) in);
      } else if (in instanceof BigDecimal) {
        rc = ((BigDecimal) in);
      } else if (in instanceof BigInteger) {
        rc = new BigDecimal((BigInteger) in);
      } else if (in instanceof Long || in instanceof Integer || in instanceof Short
          || in instanceof Byte) {
        rc = BigDecimal.valueOf(((Number) in).longValue());
      } else if (in instanceof Double || in instanceof Float) {
        rc = BigDecimal.valueOf(((Number) in).doubleValue());
      } else if (in instanceof java.util.Date) {
        rc = BigDecimal.valueOf(((java.util.Date) in).getTime());
      } else if (in instanceof Boolean) {
        rc = (Boolean) in ? BigDecimal.ONE : BigDecimal.ZERO;
      } else if (in instanceof Clob) {
        rc = new BigDecimal(asString((Clob) in));
      } else if (in instanceof Character) {
        rc = new BigDecimal(new char[]{(Character) in});
      }
      if (rc != null) {
        if (scale >= 0) {
          rc = rc.setScale(scale, RoundingMode.HALF_UP);
        }
        return rc;
      }
    } catch (final Exception e) {
      throw cannotCastException(in.getClass().getName(), "BigDecimal", e);
    }
    throw cannotCastException(in.getClass().getName(), "BigDecimal");
  }

  private static String castToString(final Object in) throws SQLException {
    try {
      if (in instanceof String) {
        return (String) in;
      }
      if (in instanceof Clob) {
        return asString((Clob) in);
      }
      // convert any unknown objects to string.
      return in.toString();

    } catch (final Exception e) {
      throw cannotCastException(in.getClass().getName(), "String", e);
    }
  }

  private static RedshiftException cannotCastException(final String fromType, final String toType) {
    return cannotCastException(fromType, toType, null);
  }

  private static RedshiftException cannotCastException(final String fromType, final String toType,
      final Exception cause) {
    return new RedshiftException(
        GT.tr("Cannot convert an instance of {0} to type {1}", fromType, toType),
        RedshiftState.INVALID_PARAMETER_TYPE, cause);
  }

  public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x, targetSqlType);
  	
    setObject(parameterIndex, x, targetSqlType, -1);
  }

  /*
   * This stores an Object into a parameter.
   */
  public void setObject(int parameterIndex, Object x) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, x);
  	
    checkClosed();
    if (x == null) {
      setNull(parameterIndex, Types.OTHER);
/*    } else if (x instanceof UUID && connection.haveMinimumServerVersion(ServerVersion.v8_3)) {
      setUuid(parameterIndex, (UUID) x); */
    } else if (x instanceof SQLXML) {
      setSQLXML(parameterIndex, (SQLXML) x);
    } else if (x instanceof String) {
      setString(parameterIndex, (String) x);
    } else if (x instanceof BigDecimal) {
      setBigDecimal(parameterIndex, (BigDecimal) x);
    } else if (x instanceof Short) {
      setShort(parameterIndex, (Short) x);
    } else if (x instanceof Integer) {
      setInt(parameterIndex, (Integer) x);
    } else if (x instanceof Long) {
      setLong(parameterIndex, (Long) x);
    } else if (x instanceof Float) {
      setFloat(parameterIndex, (Float) x);
    } else if (x instanceof Double) {
      setDouble(parameterIndex, (Double) x);
    } else if (x instanceof byte[]) {
      setBytes(parameterIndex, (byte[]) x);
    } else if (x instanceof ByteStreamWriter) {
      setByteStreamWriter(parameterIndex, (ByteStreamWriter) x);
    } else if (x instanceof java.sql.Date) {
      setDate(parameterIndex, (java.sql.Date) x);
    } else if (x instanceof Time) {
      setTime(parameterIndex, (Time) x);
    } else if (x instanceof Timestamp) {
      setTimestamp(parameterIndex, (Timestamp) x);
    } else if (x instanceof RedshiftIntervalYearToMonth) {
      setIntervalYearToMonth(parameterIndex, (RedshiftIntervalYearToMonth) x);
    } else if (x instanceof RedshiftIntervalDayToSecond) {
      setIntervalDayToSecond(parameterIndex, (RedshiftIntervalDayToSecond) x);
    } else if (x instanceof Boolean) {
      setBoolean(parameterIndex, (Boolean) x);
    } else if (x instanceof Byte) {
      setByte(parameterIndex, (Byte) x);
    } else if (x instanceof Blob) {
      setBlob(parameterIndex, (Blob) x);
    } else if (x instanceof Clob) {
      setClob(parameterIndex, (Clob) x);
    } else if (x instanceof Array) {
      setArray(parameterIndex, (Array) x);
    } else if (x instanceof RedshiftObject) {
      setRedshiftObject(parameterIndex, (RedshiftObject) x);
    } else if (x instanceof Character) {
      setString(parameterIndex, ((Character) x).toString());
      //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2"
    } else if (x instanceof LocalDate) {
      setDate(parameterIndex, (LocalDate) x);
    } else if (x instanceof LocalTime) {
      setTime(parameterIndex, (LocalTime) x);
    } else if (x instanceof LocalDateTime) {
      setTimestamp(parameterIndex, (LocalDateTime) x);
    } else if (x instanceof OffsetDateTime) {
      setTimestamp(parameterIndex, (OffsetDateTime) x);
      //JCP! endif
    } else if (x instanceof Map) {
      setMap(parameterIndex, (Map) x);
    } else if (x instanceof Number) {
      setNumber(parameterIndex, (Number) x);
    } else if (PrimitiveArraySupport.isSupportedPrimitiveArray(x)) {
      setPrimitiveArray(parameterIndex, x);
    } else {
      // Can't infer a type.
      throw new RedshiftException(GT.tr(
          "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.",
          x.getClass().getName()), RedshiftState.INVALID_PARAMETER_TYPE);
    }
  }

  /**
   * Returns the SQL statement with the current template values substituted.
   *
   * @return SQL statement with the current template values substituted
   */
  public String toString() {
    if (preparedQuery == null) {
      return super.toString();
    }

    return preparedQuery.query.toString(preparedParameters);
  }

  /**
   * Note if s is a String it should be escaped by the caller to avoid SQL injection attacks. It is
   * not done here for efficiency reasons as most calls to this method do not require escaping as
   * the source of the string is known safe (i.e. {@code Integer.toString()})
   *
   * @param paramIndex parameter index
   * @param s value (the value should already be escaped)
   * @param oid type oid
   * @throws SQLException if something goes wrong
   */
  protected void bindLiteral(int paramIndex, String s, int oid) throws SQLException {
    preparedParameters.setLiteralParameter(paramIndex, s, oid);
  }

  protected void bindBytes(int paramIndex, byte[] b, int oid) throws SQLException {
    preparedParameters.setBinaryParameter(paramIndex, b, oid);
  }

  /**
   * This version is for values that should turn into strings e.g. setString directly calls
   * bindString with no escaping; the per-protocol ParameterList does escaping as needed.
   *
   * @param paramIndex parameter index
   * @param s value
   * @param oid type oid
   * @throws SQLException if something goes wrong
   */
  private void bindString(int paramIndex, String s, int oid) throws SQLException {
    preparedParameters.setStringParameter(paramIndex, s, oid);
  }

  @Override
  public boolean isUseServerPrepare() {
    return (preparedQuery != null && mPrepareThreshold != 0
        && preparedQuery.getExecuteCount() + 1 >= mPrepareThreshold);
  }

  @Override
  public void addBatch(String sql) throws SQLException {
    checkClosed();

    throw new RedshiftException(
        GT.tr("Can''t use query methods that take a query string on a PreparedStatement."),
        RedshiftState.WRONG_OBJECT_TYPE);
  }

  @Override
  public void addBatch() throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true);
  	
    checkClosed();
    if (batchStatements == null) {
      batchStatements = new ArrayList();
      batchParameters = new ArrayList();
    }
    // we need to create copies of our parameters, otherwise the values can be changed
    batchParameters.add(preparedParameters.copy());
    Query query = preparedQuery.query;
    if (!(query instanceof BatchedQuery) || batchStatements.isEmpty()) {
      batchStatements.add(query);
    }
    
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(false);
  }

  public ResultSetMetaData getMetaData() throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true);
  	
    checkClosed();
    ResultSet rs = getResultSet();

    if (rs == null || ((RedshiftResultSet) rs).isResultSetClosed()) {
      // OK, we haven't executed it yet, or it was closed
      // we've got to go to the backend
      // for more info. We send the full query, but just don't
      // execute it.

      int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_DESCRIBE_ONLY
          | QueryExecutor.QUERY_SUPPRESS_BEGIN;
      StatementResultHandler handler = new StatementResultHandler(this);
      connection.getQueryExecutor().execute(preparedQuery.query, preparedParameters, handler, 0, 0,
          flags);
      ResultWrapper wrapper = handler.getResults();
      if (wrapper != null) {
      	// Keep reference to close the result
      	firstUnclosedResult = wrapper;
        rs = wrapper.getResultSet();
      }
      
      // Describe only execution is done.
			handler.setStatementStateIdleFromInQuery();
    }

    ResultSetMetaData rc;
    
    if (rs != null) {
      rc = rs.getMetaData();
    }
    else
    	rc = null;

    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(false, rc);
    
    return rc;
  }

  public void setArray(int i, java.sql.Array x) throws SQLException {
  	
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, i, x);
  	
    checkClosed();

    if (null == x) {
      setNull(i, Types.ARRAY);
      return;
    }

    // This only works for Array implementations that return a valid array
    // literal from Array.toString(), such as the implementation we return
    // from ResultSet.getArray(). Eventually we need a proper implementation
    // here that works for any Array implementation.
    String typename = x.getBaseTypeName();
    int oid = connection.getTypeInfo().getRSArrayType(typename);
    if (oid == Oid.UNSPECIFIED) {
      throw new RedshiftException(GT.tr("Unknown type {0}.", typename),
          RedshiftState.INVALID_PARAMETER_TYPE);
    }

    if (x instanceof RedshiftArray) {
      RedshiftArray arr = (RedshiftArray) x;
      if (arr.isBinary()) {
        bindBytes(i, arr.toBytes(), oid);
        return;
      }
    }

    setString(i, x.toString(), oid);
  }

  protected long createBlob(int i, InputStream inputStream, long length) throws SQLException {
    LargeObjectManager lom = connection.getLargeObjectAPI();
    long oid = lom.createLO();
    LargeObject lob = lom.open(oid);
    OutputStream outputStream = lob.getOutputStream();
    byte[] buf = new byte[4096];
    try {
      long remaining;
      if (length > 0) {
        remaining = length;
      } else {
        remaining = Long.MAX_VALUE;
      }
      int numRead = inputStream.read(buf, 0,
          (length > 0 && remaining < buf.length ? (int) remaining : buf.length));
      while (numRead != -1 && remaining > 0) {
        remaining -= numRead;
        outputStream.write(buf, 0, numRead);
        numRead = inputStream.read(buf, 0,
            (length > 0 && remaining < buf.length ? (int) remaining : buf.length));
      }
    } catch (IOException se) {
      throw new RedshiftException(GT.tr("Unexpected error writing large object to database."),
          RedshiftState.UNEXPECTED_ERROR, se);
    } finally {
      try {
        outputStream.close();
      } catch (Exception e) {
      }
    }
    return oid;
  }

  public void setBlob(int i, Blob x) throws SQLException {
    checkClosed();

    if (x == null) {
      setNull(i, Types.BLOB);
      return;
    }

    InputStream inStream = x.getBinaryStream();
    try {
      long oid = createBlob(i, inStream, x.length());
      setLong(i, oid);
    } finally {
      try {
        inStream.close();
      } catch (Exception e) {
      }
    }
  }

  private String readerToString(Reader value, int maxLength) throws SQLException {
    try {
      int bufferSize = Math.min(maxLength, 1024);
      StringBuilder v = new StringBuilder(bufferSize);
      char[] buf = new char[bufferSize];
      int nRead = 0;
      while (nRead > -1 && v.length() < maxLength) {
        nRead = value.read(buf, 0, Math.min(bufferSize, maxLength - v.length()));
        if (nRead > 0) {
          v.append(buf, 0, nRead);
        }
      }
      return v.toString();
    } catch (IOException ioe) {
      throw new RedshiftException(GT.tr("Provided Reader failed."), RedshiftState.UNEXPECTED_ERROR, ioe);
    }
  }

  public void setCharacterStream(int i, java.io.Reader x, int length) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, i, x, length);
  	
    checkClosed();

    if (x == null) {
      setNull(i, Types.VARCHAR);
      return;
    }

    if (length < 0) {
      throw new RedshiftException(GT.tr("Invalid stream length {0}.", length),
          RedshiftState.INVALID_PARAMETER_VALUE);
    }

    // Version 7.2 supports CharacterStream for for the RS text types
    // As the spec/javadoc for this method indicate this is to be used for
    // large text values (i.e. LONGVARCHAR) RS doesn't have a separate
    // long varchar datatype, but with toast all the text datatypes are capable of
    // handling very large values. Thus the implementation ends up calling
    // setString() since there is no current way to stream the value to the server
    setString(i, readerToString(x, length));
  }

  @Override
  public void setClob(int i, Clob x) throws SQLException {
    checkClosed();

    if (x == null) {
      setNull(i, Types.CLOB);
      return;
    }

    Reader inStream = x.getCharacterStream();
    int length = (int) x.length();
    LargeObjectManager lom = connection.getLargeObjectAPI();
    long oid = lom.createLO();
    LargeObject lob = lom.open(oid);
    Charset connectionCharset = Charset.forName(connection.getEncoding().name());
    OutputStream los = lob.getOutputStream();
    Writer lw = new OutputStreamWriter(los, connectionCharset);
    try {
      // could be buffered, but then the OutputStream returned by LargeObject
      // is buffered internally anyhow, so there would be no performance
      // boost gained, if anything it would be worse!
      int c = inStream.read();
      int p = 0;
      while (c > -1 && p < length) {
        lw.write(c);
        c = inStream.read();
        p++;
      }
      lw.close();
    } catch (IOException se) {
      throw new RedshiftException(GT.tr("Unexpected error writing large object to database."),
          RedshiftState.UNEXPECTED_ERROR, se);
    }
    // lob is closed by the stream so don't call lob.close()
    setLong(i, oid);
  }

  public void setNull(int parameterIndex, int t, String typeName) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, t, typeName);
  	
    if (typeName == null) {
      setNull(parameterIndex, t);
      return;
    }

    checkClosed();

    TypeInfo typeInfo = connection.getTypeInfo();
    int oid = typeInfo.getRSType(typeName);

    preparedParameters.setNull(parameterIndex, oid);
  }

  public void setRef(int i, Ref x) throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setRef(int,Ref)");
  }

  public void setDate(int i, java.sql.Date d, java.util.Calendar cal) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, i, d, cal);
  	
    checkClosed();

    if (d == null) {
      setNull(i, Types.DATE);
      return;
    }

    if (connection.binaryTransferSend(Oid.DATE)) {
      byte[] val = new byte[4];
      TimeZone tz = cal != null ? cal.getTimeZone() : null;
      connection.getTimestampUtils().toBinDate(tz, val, d);
      preparedParameters.setBinaryParameter(i, val, Oid.DATE);
      return;
    }

    // We must use UNSPECIFIED here, or inserting a Date-with-timezone into a
    // timestamptz field does an unexpected rotation by the server's TimeZone:
    //
    // We want to interpret 2005/01/01 with calendar +0100 as
    // "local midnight in +0100", but if we go via date it interprets it
    // as local midnight in the server's timezone:

    // template1=# select '2005-01-01+0100'::timestamptz;
    // timestamptz
    // ------------------------
    // 2005-01-01 02:00:00+03
    // (1 row)

    // template1=# select '2005-01-01+0100'::date::timestamptz;
    // timestamptz
    // ------------------------
    // 2005-01-01 00:00:00+03
    // (1 row)

    if (cal == null) {
      cal = getDefaultCalendar();
    }
    bindString(i, connection.getTimestampUtils().toString(cal, d), Oid.UNSPECIFIED);
  }

  public void setTime(int i, Time t, java.util.Calendar cal) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, i, t, cal);
  	
    checkClosed();

    if (t == null) {
      setNull(i, Types.TIME);
      return;
    }

    int oid = Oid.UNSPECIFIED;

    // If a RedshiftTime is used, we can define the OID explicitly.
    if (t instanceof RedshiftTime) {
      RedshiftTime rsTime = (RedshiftTime) t;
      if (rsTime.getCalendar() == null) {
        oid = Oid.TIME;
      } else {
        oid = Oid.TIMETZ;
        cal = rsTime.getCalendar();
      }
    }

    if (cal == null) {
      cal = getDefaultCalendar();
    }
    bindString(i, connection.getTimestampUtils().toString(cal, t), oid);
  }

  public void setTimestamp(int i, Timestamp t, java.util.Calendar cal) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, i, t, cal);

    checkClosed();

    if (t == null) {
      setNull(i, Types.TIMESTAMP);
      return;
    }

    int oid = Oid.UNSPECIFIED;

    // Use UNSPECIFIED as a compromise to get both TIMESTAMP and TIMESTAMPTZ working.
    // This is because you get this in a +1300 timezone:
    //
    // template1=# select '2005-01-01 15:00:00 +1000'::timestamptz;
    // timestamptz
    // ------------------------
    // 2005-01-01 18:00:00+13
    // (1 row)

    // template1=# select '2005-01-01 15:00:00 +1000'::timestamp;
    // timestamp
    // ---------------------
    // 2005-01-01 15:00:00
    // (1 row)

    // template1=# select '2005-01-01 15:00:00 +1000'::timestamptz::timestamp;
    // timestamp
    // ---------------------
    // 2005-01-01 18:00:00
    // (1 row)

    // So we want to avoid doing a timestamptz -> timestamp conversion, as that
    // will first convert the timestamptz to an equivalent time in the server's
    // timezone (+1300, above), then turn it into a timestamp with the "wrong"
    // time compared to the string we originally provided. But going straight
    // to timestamp is OK as the input parser for timestamp just throws away
    // the timezone part entirely. Since we don't know ahead of time what type
    // we're actually dealing with, UNSPECIFIED seems the lesser evil, even if it
    // does give more scope for type-mismatch errors being silently hidden.

    // If a RedshiftTimestamp is used, we can define the OID explicitly.
    if (t instanceof RedshiftTimestamp) {
      RedshiftTimestamp rsTimestamp = (RedshiftTimestamp) t;
      if (rsTimestamp.getCalendar() == null) {
        oid = Oid.TIMESTAMP;
      } else {
        oid = Oid.TIMESTAMPTZ;
        cal = rsTimestamp.getCalendar();
      }
    }
    if (cal == null) {
      cal = getDefaultCalendar();
    }
    bindString(i, connection.getTimestampUtils().toString(cal, t), oid);
  }

  //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2"
  private void setDate(int i, LocalDate localDate) throws SQLException {
    int oid = Oid.DATE;
    bindString(i, connection.getTimestampUtils().toString(localDate), oid);
  }

  private void setTime(int i, LocalTime localTime) throws SQLException {
    int oid = Oid.TIME;
    bindString(i, connection.getTimestampUtils().toString(localTime), oid);
  }

  private void setTimestamp(int i, LocalDateTime localDateTime) throws SQLException {
    int oid = Oid.TIMESTAMP;
    bindString(i, connection.getTimestampUtils().toString(localDateTime), oid);
  }

  private void setTimestamp(int i, OffsetDateTime offsetDateTime) throws SQLException {
    int oid = Oid.TIMESTAMPTZ;
    bindString(i, connection.getTimestampUtils().toString(offsetDateTime), oid);
  }
  //JCP! endif

  public ParameterMetaData createParameterMetaData(BaseConnection conn, int[] oids)
      throws SQLException {
    return new RedshiftParameterMetaData(conn, oids);
  }

  //JCP! if mvn.project.property.redshift.jdbc.spec >= "JDBC4.2"
  public void setObject(int parameterIndex, Object x, java.sql.SQLType targetSqlType,
      int scaleOrLength) throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setObject");
  }

  public void setObject(int parameterIndex, Object x, java.sql.SQLType targetSqlType)
      throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setObject");
  }
  //JCP! endif

  public void setRowId(int parameterIndex, RowId x) throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setRowId(int, RowId)");
  }

  public void setNString(int parameterIndex, String value) throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setNString(int, String)");
  }

  public void setNCharacterStream(int parameterIndex, Reader value, long length)
      throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setNCharacterStream(int, Reader, long)");
  }

  public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setNCharacterStream(int, Reader)");
  }

  public void setCharacterStream(int parameterIndex, Reader value, long length)
      throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setCharacterStream(int, Reader, long)");
  }

  public void setCharacterStream(int parameterIndex, Reader value) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, value);
  	
    if (connection.getPreferQueryMode() == PreferQueryMode.SIMPLE) {
      String s = (value != null) ? readerToString(value, Integer.MAX_VALUE) : null;
      setString(parameterIndex, s);
      return;
    }
    InputStream is = (value != null) ? new ReaderInputStream(value) : null;
    setObject(parameterIndex, is, Types.LONGVARCHAR);
  }

  public void setBinaryStream(int parameterIndex, InputStream value, long length)
      throws SQLException {
    if (length > Integer.MAX_VALUE) {
      throw new RedshiftException(GT.tr("Object is too large to send over the protocol."),
          RedshiftState.NUMERIC_CONSTANT_OUT_OF_RANGE);
    }
    preparedParameters.setBytea(parameterIndex, value, (int) length);
  }

  public void setBinaryStream(int parameterIndex, InputStream value) throws SQLException {
    preparedParameters.setBytea(parameterIndex, value);
  }

  public void setAsciiStream(int parameterIndex, InputStream value, long length)
      throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setAsciiStream(int, InputStream, long)");
  }

  public void setAsciiStream(int parameterIndex, InputStream value) throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setAsciiStream(int, InputStream)");
  }

  public void setNClob(int parameterIndex, NClob value) throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setNClob(int, NClob)");
  }

  public void setClob(int parameterIndex, Reader reader, long length) throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setClob(int, Reader, long)");
  }

  public void setClob(int parameterIndex, Reader reader) throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setClob(int, Reader)");
  }

  public void setBlob(int parameterIndex, InputStream inputStream, long length)
      throws SQLException {
    checkClosed();

    if (inputStream == null) {
      setNull(parameterIndex, Types.BLOB);
      return;
    }

    if (length < 0) {
      throw new RedshiftException(GT.tr("Invalid stream length {0}.", length),
          RedshiftState.INVALID_PARAMETER_VALUE);
    }

    long oid = createBlob(parameterIndex, inputStream, length);
    setLong(parameterIndex, oid);
  }

  public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException {
    checkClosed();

    if (inputStream == null) {
      setNull(parameterIndex, Types.BLOB);
      return;
    }

    long oid = createBlob(parameterIndex, inputStream, -1);
    setLong(parameterIndex, oid);
  }

  public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setNClob(int, Reader, long)");
  }

  public void setNClob(int parameterIndex, Reader reader) throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setNClob(int, Reader)");
  }

  public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true, parameterIndex, xmlObject);
  	
    checkClosed();
    String stringValue = xmlObject == null ? null : xmlObject.getString();
    if (stringValue == null) {
      setNull(parameterIndex, Types.SQLXML);
    } else {
      setString(parameterIndex, stringValue, Oid.XML);
    }
  }

  /*
  private void setUuid(int parameterIndex, UUID uuid) throws SQLException {
    if (connection.binaryTransferSend(Oid.UUID)) {
      byte[] val = new byte[16];
      ByteConverter.int8(val, 0, uuid.getMostSignificantBits());
      ByteConverter.int8(val, 8, uuid.getLeastSignificantBits());
      bindBytes(parameterIndex, val, Oid.UUID);
    } else {
      bindLiteral(parameterIndex, uuid.toString(), Oid.UUID);
    }
  } */

  public void setURL(int parameterIndex, java.net.URL x) throws SQLException {
    throw Driver.notImplemented(this.getClass(), "setURL(int,URL)");
  }

  @Override
  public int[] executeBatch() throws SQLException {
    try {
      // Note: in batch prepared statements batchStatements == 1, and batchParameters is equal
      // to the number of addBatch calls
      // batchParameters might be empty in case of empty batch
      if (batchParameters != null && batchParameters.size() > 1 && mPrepareThreshold > 0) {
        // Use server-prepared statements when there's more than one statement in a batch
        // Technically speaking, it might cause to create a server-prepared statement
        // just for 2 executions even for prepareThreshold=5. That however should be
        // acceptable since prepareThreshold is a optimization kind of parameter.
        this.preparedQuery.increaseExecuteCount(mPrepareThreshold);
      }
      return super.executeBatch();
    } finally {
      defaultTimeZone = null;
    }
  }

  private Calendar getDefaultCalendar() {
    TimestampUtils timestampUtils = connection.getTimestampUtils();
    if (timestampUtils.hasFastDefaultTimeZone()) {
      return timestampUtils.getSharedCalendar(null);
    }
    Calendar sharedCalendar = timestampUtils.getSharedCalendar(defaultTimeZone);
    if (defaultTimeZone == null) {
      defaultTimeZone = sharedCalendar.getTimeZone();
    }
    return sharedCalendar;
  }

  public ParameterMetaData getParameterMetaData() throws SQLException {
    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(true);
  	
    int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_DESCRIBE_ONLY
        | QueryExecutor.QUERY_SUPPRESS_BEGIN;
    StatementResultHandler handler = new StatementResultHandler(this);
    connection.getQueryExecutor().execute(preparedQuery.query, preparedParameters, handler, 0, 0,
        flags);

    int[] oids = preparedParameters.getTypeOIDs();
    ParameterMetaData rc;
    
    if (oids != null) {
      rc = createParameterMetaData(connection, oids);
    }
    else
    	rc = null;

    if (RedshiftLogger.isEnable())
    	connection.getLogger().logFunction(false, rc);
    
    return rc;
  }

  @Override
  protected void transformQueriesAndParameters() throws SQLException {
    if (batchParameters.size() <= 1
        || !(preparedQuery.query instanceof BatchedQuery)) {
      return;
    }
    BatchedQuery originalQuery = (BatchedQuery) preparedQuery.query;
    // Single query cannot have more than {@link Short#MAX_VALUE} binds, thus
    // the number of multi-values blocks should be capped.
    // Typically, it does not make much sense to batch more than 128 rows: performance
    // does not improve much after updating 128 statements with 1 multi-valued one, thus
    // we cap maximum batch size and split there.
    final int bindCount = originalQuery.getBindCount();
    final int highestBlockCount = ((RedshiftConnectionImpl)connection).getReWriteBatchedInsertsSize(); // 128;
    final int maxValueBlocks = bindCount == 0 ? 1024 /* if no binds, use 1024 rows */
        : Integer.highestOneBit( // deriveForMultiBatch supports powers of two only
            Math.min(Math.max(1, (Short.MAX_VALUE - 1) / bindCount), highestBlockCount));
    int unprocessedBatchCount = batchParameters.size();
    final int fullValueBlocksCount = unprocessedBatchCount / maxValueBlocks;
    final int partialValueBlocksCount = Integer.bitCount(unprocessedBatchCount % maxValueBlocks);
    final int count = fullValueBlocksCount + partialValueBlocksCount;
    ArrayList newBatchStatements = new ArrayList(count);
    ArrayList newBatchParameters = new ArrayList(count);
    int offset = 0;
    for (int i = 0; i < count; i++) {
      int valueBlock;
      if (unprocessedBatchCount >= maxValueBlocks) {
        valueBlock = maxValueBlocks;
      } else {
        valueBlock = Integer.highestOneBit(unprocessedBatchCount);
      }
      // Find appropriate batch for block count.
      BatchedQuery bq = originalQuery.deriveForMultiBatch(valueBlock, highestBlockCount, connection.getLogger());
      ParameterList newPl = bq.createParameterList();
      for (int j = 0; j < valueBlock; j++) {
        ParameterList pl = batchParameters.get(offset++);
        newPl.appendAll(pl);
      }
      newBatchStatements.add(bq);
      newBatchParameters.add(newPl);
      unprocessedBatchCount -= valueBlock;
    }
    batchStatements = newBatchStatements;
    batchParameters = newBatchParameters;
  }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy