Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package org.postgresql.jdbc;
import static org.postgresql.util.internal.Nullness.castNonNull;
import org.postgresql.Driver;
import org.postgresql.PGRefCursorResultSet;
import org.postgresql.PGResultSetMetaData;
import org.postgresql.core.BaseConnection;
import org.postgresql.core.BaseStatement;
import org.postgresql.core.Encoding;
import org.postgresql.core.Field;
import org.postgresql.core.Oid;
import org.postgresql.core.Provider;
import org.postgresql.core.Query;
import org.postgresql.core.ResultCursor;
import org.postgresql.core.ResultHandlerBase;
import org.postgresql.core.TransactionState;
import org.postgresql.core.Tuple;
import org.postgresql.core.TypeInfo;
import org.postgresql.core.Utils;
import org.postgresql.util.ByteConverter;
import org.postgresql.util.GT;
import org.postgresql.util.HStoreConverter;
import org.postgresql.util.JdbcBlackHole;
import org.postgresql.util.NumberParser;
import org.postgresql.util.PGbytea;
import org.postgresql.util.PGobject;
import org.postgresql.util.PGtokenizer;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.checkerframework.checker.index.qual.NonNegative;
import org.checkerframework.checker.index.qual.Positive;
import org.checkerframework.checker.nullness.qual.EnsuresNonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.checkerframework.checker.nullness.qual.PolyNull;
import org.checkerframework.checker.nullness.qual.RequiresNonNull;
import org.checkerframework.dataflow.qual.Pure;
import java.io.ByteArrayInputStream;
import java.io.CharArrayReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.URL;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.Date;
import java.sql.NClob;
import java.sql.PreparedStatement;
import java.sql.Ref;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLType;
import java.sql.SQLWarning;
import java.sql.SQLXML;
import java.sql.Statement;
import java.sql.Time;
import java.sql.Timestamp;
import java.sql.Types;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.OffsetDateTime;
import java.time.OffsetTime;
import java.time.ZoneOffset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.StringTokenizer;
import java.util.TimeZone;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
public class PgResultSet implements ResultSet, PGRefCursorResultSet {
// needed for updateable result set support
private boolean updateable;
private boolean doingUpdates;
private @Nullable HashMap updateValues;
private boolean usingOID; // are we using the OID for the primary key?
private @Nullable List primaryKeys; // list of primary keys
private boolean singleTable;
private String onlyTable = "";
private @Nullable String tableName;
private @Nullable PreparedStatement deleteStatement;
private final int resultsettype;
private final int resultsetconcurrency;
private int fetchdirection = ResultSet.FETCH_UNKNOWN;
private @Nullable TimeZone defaultTimeZone;
protected final BaseConnection connection; // the connection we belong to
protected final BaseStatement statement; // the statement we belong to
protected final Field[] fields; // Field metadata for this resultset.
protected final @Nullable Query originalQuery; // Query we originated from
private @Nullable TimestampUtils timestampUtils; // our own Object because it's not thread safe
protected final int maxRows; // Maximum rows in this resultset (might be 0).
protected final int maxFieldSize; // Maximum field size in this resultset (might be 0).
protected @Nullable List rows; // Current page of results.
protected int currentRow = -1; // Index into 'rows' of our current row (0-based)
protected int rowOffset; // Offset of row 0 in the actual resultset
protected @Nullable Tuple thisRow; // copy of the current result row
protected @Nullable SQLWarning warnings; // The warning chain
/**
* True if the last obtained column value was SQL NULL as specified by {@link #wasNull}. The value
* is always updated by the {@link #getRawValue} method.
*/
protected boolean wasNullFlag;
protected boolean onInsertRow;
// are we on the insert row (for JDBC2 updatable resultsets)?
private @Nullable Tuple rowBuffer; // updateable rowbuffer
protected int fetchSize; // Current fetch size (might be 0).
protected int lastUsedFetchSize; // Fetch size used during last fetch
protected boolean adaptiveFetch;
protected @Nullable ResultCursor cursor; // Cursor for fetching additional data.
// Speed up findColumn by caching lookups
private @Nullable Map columnNameIndexMap;
private @Nullable ResultSetMetaData rsMetaData;
private final ResourceLock lock = new ResourceLock();
protected ResultSetMetaData createMetaData() throws SQLException {
return new PgResultSetMetaData(connection, fields);
}
@Override
public ResultSetMetaData getMetaData() throws SQLException {
checkClosed();
if (rsMetaData == null) {
rsMetaData = createMetaData();
}
return rsMetaData;
}
PgResultSet(@Nullable Query originalQuery, BaseStatement statement,
Field[] fields, List tuples,
@Nullable ResultCursor cursor, int maxRows, int maxFieldSize, int rsType, int rsConcurrency,
int rsHoldability, boolean adaptiveFetch) throws SQLException {
// Fail-fast on invalid null inputs
if (tuples == null) {
throw new NullPointerException("tuples must be non-null");
}
if (fields == null) {
throw new NullPointerException("fields must be non-null");
}
this.originalQuery = originalQuery;
this.connection = (BaseConnection) statement.getConnection();
this.statement = statement;
this.fields = fields;
this.rows = tuples;
this.cursor = cursor;
this.maxRows = maxRows;
this.maxFieldSize = maxFieldSize;
this.resultsettype = rsType;
this.resultsetconcurrency = rsConcurrency;
this.adaptiveFetch = adaptiveFetch;
// Constructor doesn't have fetch size and can't be sure if fetch size was used so initial value would be the number of rows
this.lastUsedFetchSize = tuples.size();
}
@Override
public URL getURL(@Positive int columnIndex) throws SQLException {
connection.getLogger().log(Level.FINEST, " getURL columnIndex: {0}", columnIndex);
checkClosed();
throw Driver.notImplemented(this.getClass(), "getURL(int)");
}
@Override
public URL getURL(String columnName) throws SQLException {
return getURL(findColumn(columnName));
}
@RequiresNonNull({"thisRow"})
protected @Nullable Object internalGetObject(@Positive int columnIndex, Field field) throws SQLException {
castNonNull(thisRow, "thisRow");
switch (getSQLType(columnIndex)) {
case Types.BOOLEAN:
case Types.BIT:
if (field.getOID() == Oid.BOOL) {
return getBoolean(columnIndex);
}
if (field.getOID() == Oid.BIT) {
// Let's peek at the data - I tried to use the field.getLength() but it returns 65535 and
// it doesn't reflect the real length of the field, which is odd.
// If we have 1 byte, it's a bit(1) and return a boolean to preserve the backwards
// compatibility. If the value is null, it doesn't really matter
byte[] data = getRawValue(columnIndex);
if (data == null || data.length == 1) {
return getBoolean(columnIndex);
}
}
// Returning null here will lead to another value processing path for the bit field
// which will return a PGobject
return null;
case Types.SQLXML:
return getSQLXML(columnIndex);
case Types.TINYINT:
case Types.SMALLINT:
case Types.INTEGER:
return getInt(columnIndex);
case Types.BIGINT:
return getLong(columnIndex);
case Types.NUMERIC:
case Types.DECIMAL:
return getNumeric(columnIndex,
field.getMod() == -1 ? -1 : ((field.getMod() - 4) & 0xffff), true);
case Types.REAL:
return getFloat(columnIndex);
case Types.FLOAT:
case Types.DOUBLE:
return getDouble(columnIndex);
case Types.CHAR:
case Types.VARCHAR:
case Types.LONGVARCHAR:
return getString(columnIndex);
case Types.DATE:
return getDate(columnIndex);
case Types.TIME:
return getTime(columnIndex);
case Types.TIMESTAMP:
return getTimestamp(columnIndex, null);
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
return getBytes(columnIndex);
case Types.ARRAY:
return getArray(columnIndex);
case Types.CLOB:
return getClob(columnIndex);
case Types.BLOB:
return getBlob(columnIndex);
default:
String type = getPGType(columnIndex);
// if the backend doesn't know the type then coerce to String
if ("unknown".equals(type)) {
return getString(columnIndex);
}
if ("uuid".equals(type)) {
if (isBinary(columnIndex)) {
return getUUID(castNonNull(thisRow.get(columnIndex - 1)));
}
return getUUID(castNonNull(getString(columnIndex)));
}
// Specialized support for ref cursors is neater.
if ("refcursor".equals(type)) {
// Fetch all results.
String cursorName = castNonNull(getString(columnIndex));
StringBuilder sb = new StringBuilder("FETCH ALL IN ");
Utils.escapeIdentifier(sb, cursorName);
// nb: no BEGIN triggered here. This is fine. If someone
// committed, and the cursor was not holdable (closing the
// cursor), we avoid starting a new xact and promptly causing
// it to fail. If the cursor *was* holdable, we don't want a
// new xact anyway since holdable cursor state isn't affected
// by xact boundaries. If our caller didn't commit at all, or
// autocommit was on, then we wouldn't issue a BEGIN anyway.
//
// We take the scrollability from the statement, but until
// we have updatable cursors it must be readonly.
ResultSet rs =
connection.execSQLQuery(sb.toString(), resultsettype, ResultSet.CONCUR_READ_ONLY);
((PgResultSet) rs).setRefCursor(cursorName);
// In long-running transactions these backend cursors take up memory space
// we could close in rs.close(), but if the transaction is closed before the result set,
// then
// the cursor no longer exists
((PgResultSet) rs).closeRefCursor();
return rs;
}
if ("hstore".equals(type)) {
if (isBinary(columnIndex)) {
return HStoreConverter.fromBytes(castNonNull(thisRow.get(columnIndex - 1)),
connection.getEncoding());
}
return HStoreConverter.fromString(castNonNull(getString(columnIndex)));
}
// Caller determines what to do (JDBC3 overrides in this case)
return null;
}
}
@Pure
@EnsuresNonNull("rows")
private void checkScrollable() throws SQLException {
checkClosed();
if (resultsettype == ResultSet.TYPE_FORWARD_ONLY) {
throw new PSQLException(
GT.tr("Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."),
PSQLState.INVALID_CURSOR_STATE);
}
}
@Override
public boolean absolute(int index) throws SQLException {
checkScrollable();
// index is 1-based, but internally we use 0-based indices
int internalIndex;
if (index == 0) {
beforeFirst();
return false;
}
final int rowsSize = rows.size();
// if index<0, count from the end of the result set, but check
// to be sure that it is not beyond the first index
if (index < 0) {
if (index >= -rowsSize) {
internalIndex = rowsSize + index;
} else {
beforeFirst();
return false;
}
} else {
// must be the case that index>0,
// find the correct place, assuming that
// the index is not too large
if (index <= rowsSize) {
internalIndex = index - 1;
} else {
afterLast();
return false;
}
}
currentRow = internalIndex;
initRowBuffer();
onInsertRow = false;
return true;
}
@Override
public void afterLast() throws SQLException {
checkScrollable();
final int rowsSize = rows.size();
if (rowsSize > 0) {
currentRow = rowsSize;
}
onInsertRow = false;
thisRow = null;
rowBuffer = null;
}
@Override
public void beforeFirst() throws SQLException {
checkScrollable();
if (!rows.isEmpty()) {
currentRow = -1;
}
onInsertRow = false;
thisRow = null;
rowBuffer = null;
}
@Override
public boolean first() throws SQLException {
checkScrollable();
if (rows.size() <= 0) {
return false;
}
currentRow = 0;
initRowBuffer();
onInsertRow = false;
return true;
}
@Override
public @Nullable Array getArray(String colName) throws SQLException {
return getArray(findColumn(colName));
}
protected Array makeArray(int oid, byte[] value) throws SQLException {
return new PgArray(connection, oid, value);
}
protected Array makeArray(int oid, String value) throws SQLException {
return new PgArray(connection, oid, value);
}
@Pure
@Override
public @Nullable Array getArray(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
int oid = fields[i - 1].getOID();
if (isBinary(i)) {
return makeArray(oid, value);
}
return makeArray(oid, castNonNull(getFixedString(i)));
}
@Override
public @Nullable BigDecimal getBigDecimal(@Positive int columnIndex) throws SQLException {
return getBigDecimal(columnIndex, -1);
}
@Override
public @Nullable BigDecimal getBigDecimal(String columnName) throws SQLException {
return getBigDecimal(findColumn(columnName));
}
@Override
public @Nullable Blob getBlob(String columnName) throws SQLException {
return getBlob(findColumn(columnName));
}
protected Blob makeBlob(long oid) throws SQLException {
return new PgBlob(connection, oid);
}
@Override
@Pure
public @Nullable Blob getBlob(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
return makeBlob(getLong(i));
}
@Override
public @Nullable Reader getCharacterStream(String columnName) throws SQLException {
return getCharacterStream(findColumn(columnName));
}
@Override
public @Nullable Reader getCharacterStream(int i) throws SQLException {
String value = getString(i);
if (value == null) {
return null;
}
// Version 7.2 supports AsciiStream for all the PG text types
// As the spec/javadoc for this method indicate this is to be used for
// large text values (i.e. LONGVARCHAR) PG doesn't have a separate
// long string datatype, but with toast the text datatype is capable of
// handling very large values. Thus the implementation ends up calling
// getString() since there is no current way to stream the value from the server
return new CharArrayReader(value.toCharArray());
}
@Override
public @Nullable Clob getClob(String columnName) throws SQLException {
return getClob(findColumn(columnName));
}
protected Clob makeClob(long oid) throws SQLException {
return new PgClob(connection, oid);
}
@Override
@Pure
public @Nullable Clob getClob(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
return makeClob(getLong(i));
}
@Override
public int getConcurrency() throws SQLException {
checkClosed();
return resultsetconcurrency;
}
@Override
public @Nullable Date getDate(
int i, @Nullable Calendar cal) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
if (cal == null) {
cal = getDefaultCalendar();
}
if (isBinary(i)) {
int col = i - 1;
int oid = fields[col].getOID();
TimeZone tz = cal.getTimeZone();
if (oid == Oid.DATE) {
return getTimestampUtils().toDateBin(tz, value);
} else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) {
// If backend provides just TIMESTAMP, we use "cal" timezone
// If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value
Timestamp timestamp = castNonNull(getTimestamp(i, cal));
// Here we just truncate date to 00:00 in a given time zone
return getTimestampUtils().convertToDate(timestamp.getTime(), tz);
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "date"),
PSQLState.DATA_TYPE_MISMATCH);
}
}
return getTimestampUtils().toDate(cal, castNonNull(getString(i)));
}
@Override
public @Nullable Time getTime(
int i, @Nullable Calendar cal) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
if (cal == null) {
cal = getDefaultCalendar();
}
if (isBinary(i)) {
int col = i - 1;
int oid = fields[col].getOID();
TimeZone tz = cal.getTimeZone();
if (oid == Oid.TIME || oid == Oid.TIMETZ) {
return getTimestampUtils().toTimeBin(tz, value);
} else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) {
// If backend provides just TIMESTAMP, we use "cal" timezone
// If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value
Timestamp timestamp = getTimestamp(i, cal);
if (timestamp == null) {
return null;
}
long timeMillis = timestamp.getTime();
if (oid == Oid.TIMESTAMPTZ) {
// time zone == UTC since BINARY "timestamp with time zone" is always sent in UTC
// So we truncate days
return new Time(timeMillis % TimeUnit.DAYS.toMillis(1));
}
// Here we just truncate date part
return getTimestampUtils().convertToTime(timeMillis, tz);
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "time"),
PSQLState.DATA_TYPE_MISMATCH);
}
}
String string = getString(i);
return getTimestampUtils().toTime(cal, string);
}
@Pure
@Override
public @Nullable Timestamp getTimestamp(
int i, @Nullable Calendar cal) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
if (cal == null) {
cal = getDefaultCalendar();
}
int col = i - 1;
int oid = fields[col].getOID();
if (isBinary(i)) {
byte [] row = castNonNull(thisRow).get(col);
if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) {
boolean hasTimeZone = oid == Oid.TIMESTAMPTZ;
TimeZone tz = cal.getTimeZone();
return getTimestampUtils().toTimestampBin(tz, castNonNull(row), hasTimeZone);
} else if (oid == Oid.TIME) {
// JDBC spec says getTimestamp of Time and Date must be supported
Timestamp tsWithMicros = getTimestampUtils().toTimestampBin(cal.getTimeZone(), castNonNull(row), false);
// If server sends us a TIME, we ensure java counterpart has date of 1970-01-01
Timestamp tsUnixEpochDate = new Timestamp(castNonNull(getTime(i, cal)).getTime());
tsUnixEpochDate.setNanos(tsWithMicros.getNanos());
return tsUnixEpochDate;
} else if (oid == Oid.TIMETZ) {
TimeZone tz = cal.getTimeZone();
byte[] timeBytesWithoutTimeZone = Arrays.copyOfRange(castNonNull(row), 0, 8);
Timestamp tsWithMicros = getTimestampUtils().toTimestampBin(tz, timeBytesWithoutTimeZone, false);
// If server sends us a TIMETZ, we ensure java counterpart has date of 1970-01-01
Timestamp tsUnixEpochDate = new Timestamp(castNonNull(getTime(i, cal)).getTime());
tsUnixEpochDate.setNanos(tsWithMicros.getNanos());
return tsUnixEpochDate;
} else if (oid == Oid.DATE) {
new Timestamp(castNonNull(getDate(i, cal)).getTime());
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "timestamp"),
PSQLState.DATA_TYPE_MISMATCH);
}
}
// If this is actually a timestamptz, the server-provided timezone will override
// the one we pass in, which is the desired behaviour. Otherwise, we'll
// interpret the timezone-less value in the provided timezone.
String string = castNonNull(getString(i));
if (oid == Oid.TIME || oid == Oid.TIMETZ) {
// If server sends us a TIME, we ensure java counterpart has date of 1970-01-01
Timestamp tsWithMicros = getTimestampUtils().toTimestamp(cal, string);
Timestamp tsUnixEpochDate = new Timestamp(getTimestampUtils().toTime(cal, string).getTime());
tsUnixEpochDate.setNanos(tsWithMicros.getNanos());
return tsUnixEpochDate;
}
return getTimestampUtils().toTimestamp(cal, string);
}
// TODO: In Java 8 this constant is missing, later versions (at least 11) have LocalDate#EPOCH:
private static final LocalDate LOCAL_DATE_EPOCH = LocalDate.of(1970, 1, 1);
private @Nullable OffsetDateTime getOffsetDateTime(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
int col = i - 1;
int oid = fields[col].getOID();
// TODO: Disallow getting OffsetDateTime from a non-TZ field
if (isBinary(i)) {
if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) {
return getTimestampUtils().toOffsetDateTimeBin(value);
} else if (oid == Oid.TIMETZ) {
// JDBC spec says timetz must be supported
return getTimestampUtils().toOffsetTimeBin(value).atDate(LOCAL_DATE_EPOCH);
}
} else {
// string
if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP ) {
OffsetDateTime offsetDateTime = getTimestampUtils().toOffsetDateTime(castNonNull(getString(i)));
if ( offsetDateTime != OffsetDateTime.MAX && offsetDateTime != OffsetDateTime.MIN ) {
return offsetDateTime.withOffsetSameInstant(ZoneOffset.UTC);
} else {
return offsetDateTime;
}
}
if ( oid == Oid.TIMETZ ) {
return getTimestampUtils().toOffsetDateTime(castNonNull(getString(i)));
}
}
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "java.time.OffsetDateTime"),
PSQLState.DATA_TYPE_MISMATCH);
}
private @Nullable OffsetTime getOffsetTime(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
int col = i - 1;
int oid = fields[col].getOID();
if (oid == Oid.TIMETZ) {
if (isBinary(i)) {
return getTimestampUtils().toOffsetTimeBin(value);
} else {
return getTimestampUtils().toOffsetTime(castNonNull(getString(i)));
}
}
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "java.time.OffsetTime"),
PSQLState.DATA_TYPE_MISMATCH);
}
private @Nullable LocalDateTime getLocalDateTime(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
int col = i - 1;
int oid = fields[col].getOID();
if (oid == Oid.TIMESTAMP) {
if (isBinary(i)) {
return getTimestampUtils().toLocalDateTimeBin(value);
} else {
return getTimestampUtils().toLocalDateTime(castNonNull(getString(i)));
}
}
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "java.time.LocalDateTime"),
PSQLState.DATA_TYPE_MISMATCH);
}
private @Nullable LocalDate getLocalDate(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
int col = i - 1;
int oid = fields[col].getOID();
if (isBinary(i)) {
if (oid == Oid.DATE) {
return getTimestampUtils().toLocalDateBin(value);
} else if (oid == Oid.TIMESTAMP) {
return getTimestampUtils().toLocalDateTimeBin(value).toLocalDate();
}
} else {
// string
if (oid == Oid.DATE || oid == Oid.TIMESTAMP) {
return getTimestampUtils().toLocalDateTime(castNonNull(getString(i))).toLocalDate();
}
}
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "java.time.LocalDate"),
PSQLState.DATA_TYPE_MISMATCH);
}
private @Nullable LocalTime getLocalTime(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
int col = i - 1;
int oid = fields[col].getOID();
if (oid == Oid.TIME) {
if (isBinary(i)) {
return getTimestampUtils().toLocalTimeBin(value);
} else {
return getTimestampUtils().toLocalTime(getString(i));
}
}
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "java.time.LocalTime"),
PSQLState.DATA_TYPE_MISMATCH);
}
@Override
public @Nullable Date getDate(
String c, @Nullable Calendar cal) throws SQLException {
return getDate(findColumn(c), cal);
}
@Override
public @Nullable Time getTime(
String c, @Nullable Calendar cal) throws SQLException {
return getTime(findColumn(c), cal);
}
@Override
public @Nullable Timestamp getTimestamp(
String c, @Nullable Calendar cal) throws SQLException {
return getTimestamp(findColumn(c), cal);
}
@Override
public int getFetchDirection() throws SQLException {
checkClosed();
return fetchdirection;
}
public @Nullable Object getObjectImpl(
String columnName, @Nullable Map> map) throws SQLException {
return getObjectImpl(findColumn(columnName), map);
}
/*
* This checks against map for the type of column i, and if found returns an object based on that
* mapping. The class must implement the SQLData interface.
*/
public @Nullable Object getObjectImpl(
int i, @Nullable Map> map) throws SQLException {
checkClosed();
if (map == null || map.isEmpty()) {
return getObject(i);
}
throw Driver.notImplemented(this.getClass(), "getObjectImpl(int,Map)");
}
@Override
public @Nullable Ref getRef(String columnName) throws SQLException {
return getRef(findColumn(columnName));
}
@Override
public @Nullable Ref getRef(int i) throws SQLException {
checkClosed();
// The backend doesn't yet have SQL3 REF types
throw Driver.notImplemented(this.getClass(), "getRef(int)");
}
@Override
public int getRow() throws SQLException {
checkClosed();
if (onInsertRow) {
return 0;
}
final int rowsSize = rows.size();
if (currentRow < 0 || currentRow >= rowsSize) {
return 0;
}
return rowOffset + currentRow + 1;
}
// This one needs some thought, as not all ResultSets come from a statement
@Override
public Statement getStatement() throws SQLException {
checkClosed();
return statement;
}
@Override
public int getType() throws SQLException {
checkClosed();
return resultsettype;
}
@Pure
@Override
public boolean isAfterLast() throws SQLException {
checkClosed();
if (onInsertRow) {
return false;
}
castNonNull(rows, "rows");
final int rowsSize = rows.size();
if (rowOffset + rowsSize == 0) {
return false;
}
return currentRow >= rowsSize;
}
@Pure
@Override
public boolean isBeforeFirst() throws SQLException {
checkClosed();
if (onInsertRow) {
return false;
}
return (rowOffset + currentRow) < 0 && !castNonNull(rows, "rows").isEmpty();
}
@Override
public boolean isFirst() throws SQLException {
checkClosed();
if (onInsertRow) {
return false;
}
final int rowsSize = rows.size();
if (rowOffset + rowsSize == 0) {
return false;
}
return (rowOffset + currentRow) == 0;
}
@Override
public boolean isLast() throws SQLException {
checkClosed();
if (onInsertRow) {
return false;
}
List rows = castNonNull(this.rows, "rows");
final int rowsSize = rows.size();
if (rowsSize == 0) {
return false; // No rows.
}
if (currentRow != (rowsSize - 1)) {
return false; // Not on the last row of this block.
}
// We are on the last row of the current block.
ResultCursor cursor = this.cursor;
if (cursor == null) {
// This is the last block and therefore the last row.
return true;
}
if (maxRows > 0 && rowOffset + currentRow == maxRows) {
// We are implicitly limited by maxRows.
return true;
}
// Now the more painful case begins.
// We are on the last row of the current block, but we don't know if the
// current block is the last block; we must try to fetch some more data to
// find out.
// We do a fetch of the next block, then prepend the current row to that
// block (so currentRow == 0). This works as the current row
// must be the last row of the current block if we got this far.
rowOffset += rowsSize - 1; // Discarding all but one row.
// Work out how many rows maxRows will let us fetch.
int fetchRows = fetchSize;
int adaptiveFetchRows = connection.getQueryExecutor()
.getAdaptiveFetchSize(adaptiveFetch, cursor);
if (adaptiveFetchRows != -1) {
fetchRows = adaptiveFetchRows;
}
if (maxRows != 0) {
if (fetchRows == 0 || rowOffset + fetchRows > maxRows) {
// Fetch would exceed maxRows, limit it.
fetchRows = maxRows - rowOffset;
}
}
// Do the actual fetch.
connection.getQueryExecutor()
.fetch(cursor, new CursorResultHandler(), fetchRows, adaptiveFetch);
// After fetch, update last used fetch size (could be useful during adaptive fetch).
lastUsedFetchSize = fetchRows;
rows = castNonNull(this.rows, "rows");
// Now prepend our one saved row and move to it.
rows.add(0, castNonNull(thisRow));
currentRow = 0;
// Finally, now we can tell if we're the last row or not.
return rows.size() == 1;
}
@Override
public boolean last() throws SQLException {
checkScrollable();
List rows = castNonNull(this.rows, "rows");
final int rowsSize = rows.size();
if (rowsSize <= 0) {
return false;
}
currentRow = rowsSize - 1;
initRowBuffer();
onInsertRow = false;
return true;
}
@Override
public boolean previous() throws SQLException {
checkScrollable();
if (onInsertRow) {
throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."),
PSQLState.INVALID_CURSOR_STATE);
}
if (currentRow - 1 < 0) {
currentRow = -1;
thisRow = null;
rowBuffer = null;
return false;
} else {
currentRow--;
}
initRowBuffer();
return true;
}
@Override
public boolean relative(int rows) throws SQLException {
checkScrollable();
if (onInsertRow) {
throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."),
PSQLState.INVALID_CURSOR_STATE);
}
// have to add 1 since absolute expects a 1-based index
int index = currentRow + 1 + rows;
if (index < 0) {
beforeFirst();
return false;
}
return absolute(index);
}
@Override
public void setFetchDirection(int direction) throws SQLException {
checkClosed();
switch (direction) {
case ResultSet.FETCH_FORWARD:
break;
case ResultSet.FETCH_REVERSE:
case ResultSet.FETCH_UNKNOWN:
checkScrollable();
break;
default:
throw new PSQLException(GT.tr("Invalid fetch direction constant: {0}.", direction),
PSQLState.INVALID_PARAMETER_VALUE);
}
this.fetchdirection = direction;
}
@Override
public void cancelRowUpdates() throws SQLException {
try (ResourceLock ignore = lock.obtain()) {
checkClosed();
if (onInsertRow) {
throw new PSQLException(GT.tr("Cannot call cancelRowUpdates() when on the insert row."),
PSQLState.INVALID_CURSOR_STATE);
}
if (doingUpdates) {
doingUpdates = false;
clearRowBuffer(true);
}
}
}
@Override
public void deleteRow() throws SQLException {
try (ResourceLock ignore = lock.obtain()) {
checkUpdateable();
if (onInsertRow) {
throw new PSQLException(GT.tr("Cannot call deleteRow() when on the insert row."),
PSQLState.INVALID_CURSOR_STATE);
}
if (isBeforeFirst()) {
throw new PSQLException(
GT.tr(
"Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."),
PSQLState.INVALID_CURSOR_STATE);
}
if (isAfterLast()) {
throw new PSQLException(
GT.tr(
"Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."),
PSQLState.INVALID_CURSOR_STATE);
}
List rows = castNonNull(this.rows, "rows");
if (rows.isEmpty()) {
throw new PSQLException(GT.tr("There are no rows in this ResultSet."),
PSQLState.INVALID_CURSOR_STATE);
}
List primaryKeys = castNonNull(this.primaryKeys, "primaryKeys");
int numKeys = primaryKeys.size();
PreparedStatement deleteStatement = this.deleteStatement;
if (deleteStatement == null) {
StringBuilder deleteSQL =
new StringBuilder("DELETE FROM ").append(onlyTable).append(tableName).append(" where ");
for (int i = 0; i < numKeys; i++) {
Utils.escapeIdentifier(deleteSQL, primaryKeys.get(i).name);
deleteSQL.append(" = ?");
if (i < numKeys - 1) {
deleteSQL.append(" and ");
}
}
this.deleteStatement = deleteStatement = connection.prepareStatement(deleteSQL.toString());
}
deleteStatement.clearParameters();
for (int i = 0; i < numKeys; i++) {
deleteStatement.setObject(i + 1, primaryKeys.get(i).getValue());
}
deleteStatement.executeUpdate();
rows.remove(currentRow);
currentRow--;
moveToCurrentRow();
}
}
@Override
public void insertRow() throws SQLException {
try (ResourceLock ignore = lock.obtain()) {
checkUpdateable();
castNonNull(rows, "rows");
if (!onInsertRow) {
throw new PSQLException(GT.tr("Not on the insert row."), PSQLState.INVALID_CURSOR_STATE);
}
HashMap updateValues = this.updateValues;
if (updateValues == null || updateValues.isEmpty()) {
throw new PSQLException(GT.tr("You must specify at least one column value to insert a row."),
PSQLState.INVALID_PARAMETER_VALUE);
}
// loop through the keys in the insertTable and create the sql statement
// we have to create the sql every time since the user could insert different
// columns each time
StringBuilder insertSQL = new StringBuilder("INSERT INTO ").append(tableName).append(" (");
StringBuilder paramSQL = new StringBuilder(") values (");
Iterator columnNames = updateValues.keySet().iterator();
int numColumns = updateValues.size();
for (int i = 0; columnNames.hasNext(); i++) {
String columnName = columnNames.next();
Utils.escapeIdentifier(insertSQL, columnName);
if (i < numColumns - 1) {
insertSQL.append(", ");
paramSQL.append("?,");
} else {
paramSQL.append("?)");
}
}
insertSQL.append(paramSQL.toString());
PreparedStatement insertStatement = null;
Tuple rowBuffer = castNonNull(this.rowBuffer);
try {
insertStatement = connection.prepareStatement(insertSQL.toString(), Statement.RETURN_GENERATED_KEYS);
Iterator