Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package org.postgresql.jdbc;
import static org.postgresql.util.internal.Nullness.castNonNull;
import org.postgresql.PGResultSetMetaData;
import org.postgresql.PGStatement;
import org.postgresql.core.BaseConnection;
import org.postgresql.core.BaseStatement;
import org.postgresql.core.Encoding;
import org.postgresql.core.Field;
import org.postgresql.core.Oid;
import org.postgresql.core.Query;
import org.postgresql.core.ResultCursor;
import org.postgresql.core.ResultHandlerBase;
import org.postgresql.core.Tuple;
import org.postgresql.core.TypeInfo;
import org.postgresql.core.Utils;
import org.postgresql.util.ByteConverter;
import org.postgresql.util.GT;
import org.postgresql.util.HStoreConverter;
import org.postgresql.util.JdbcBlackHole;
import org.postgresql.util.PGbytea;
import org.postgresql.util.PGobject;
import org.postgresql.util.PGtokenizer;
import org.postgresql.util.PSQLException;
import org.postgresql.util.PSQLState;
import org.checkerframework.checker.index.qual.NonNegative;
import org.checkerframework.checker.index.qual.Positive;
import org.checkerframework.checker.nullness.qual.EnsuresNonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.checkerframework.checker.nullness.qual.PolyNull;
import org.checkerframework.checker.nullness.qual.RequiresNonNull;
import org.checkerframework.dataflow.qual.Pure;
import java.io.ByteArrayInputStream;
import java.io.CharArrayReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.Date;
import java.sql.NClob;
import java.sql.PreparedStatement;
import java.sql.Ref;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.sql.SQLXML;
import java.sql.Statement;
import java.sql.Time;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.TimeZone;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
public class PgResultSet implements ResultSet, org.postgresql.PGRefCursorResultSet {
// needed for updateable result set support
private boolean updateable = false;
private boolean doingUpdates = false;
private @Nullable HashMap updateValues = null;
private boolean usingOID = false; // are we using the OID for the primary key?
private @Nullable List primaryKeys; // list of primary keys
private boolean singleTable = false;
private String onlyTable = "";
private @Nullable String tableName = null;
private @Nullable PreparedStatement deleteStatement = null;
private final int resultsettype;
private final int resultsetconcurrency;
private int fetchdirection = ResultSet.FETCH_UNKNOWN;
private @Nullable TimeZone defaultTimeZone;
protected final BaseConnection connection; // the connection we belong to
protected final BaseStatement statement; // the statement we belong to
protected final Field[] fields; // Field metadata for this resultset.
protected final @Nullable Query originalQuery; // Query we originated from
protected final int maxRows; // Maximum rows in this resultset (might be 0).
protected final int maxFieldSize; // Maximum field size in this resultset (might be 0).
protected @Nullable List rows; // Current page of results.
protected int currentRow = -1; // Index into 'rows' of our currrent row (0-based)
protected int rowOffset; // Offset of row 0 in the actual resultset
protected @Nullable Tuple thisRow; // copy of the current result row
protected @Nullable SQLWarning warnings = null; // The warning chain
/**
* True if the last obtained column value was SQL NULL as specified by {@link #wasNull}. The value
* is always updated by the {@link #getRawValue} method.
*/
protected boolean wasNullFlag = false;
protected boolean onInsertRow = false;
// are we on the insert row (for JDBC2 updatable resultsets)?
private @Nullable Tuple rowBuffer = null; // updateable rowbuffer
protected int fetchSize; // Current fetch size (might be 0).
protected @Nullable ResultCursor cursor; // Cursor for fetching additional data.
// Speed up findColumn by caching lookups
private @Nullable Map columnNameIndexMap;
private @Nullable ResultSetMetaData rsMetaData;
protected ResultSetMetaData createMetaData() throws SQLException {
return new PgResultSetMetaData(connection, fields);
}
public ResultSetMetaData getMetaData() throws SQLException {
checkClosed();
if (rsMetaData == null) {
rsMetaData = createMetaData();
}
return rsMetaData;
}
PgResultSet(@Nullable Query originalQuery, BaseStatement statement,
Field[] fields, List tuples,
@Nullable ResultCursor cursor, int maxRows, int maxFieldSize, int rsType, int rsConcurrency,
int rsHoldability) throws SQLException {
// Fail-fast on invalid null inputs
if (tuples == null) {
throw new NullPointerException("tuples must be non-null");
}
if (fields == null) {
throw new NullPointerException("fields must be non-null");
}
this.originalQuery = originalQuery;
this.connection = (BaseConnection) statement.getConnection();
this.statement = statement;
this.fields = fields;
this.rows = tuples;
this.cursor = cursor;
this.maxRows = maxRows;
this.maxFieldSize = maxFieldSize;
this.resultsettype = rsType;
this.resultsetconcurrency = rsConcurrency;
}
public java.net.URL getURL(@Positive int columnIndex) throws SQLException {
connection.getLogger().log(Level.FINEST, " getURL columnIndex: {0}", columnIndex);
checkClosed();
throw org.postgresql.Driver.notImplemented(this.getClass(), "getURL(int)");
}
public java.net.URL getURL(String columnName) throws SQLException {
return getURL(findColumn(columnName));
}
@RequiresNonNull({"thisRow"})
protected @Nullable Object internalGetObject(@Positive int columnIndex, Field field) throws SQLException {
castNonNull(thisRow, "thisRow");
switch (getSQLType(columnIndex)) {
case Types.BOOLEAN:
case Types.BIT:
return getBoolean(columnIndex);
case Types.SQLXML:
return getSQLXML(columnIndex);
case Types.TINYINT:
case Types.SMALLINT:
case Types.INTEGER:
return getInt(columnIndex);
case Types.BIGINT:
return getLong(columnIndex);
case Types.NUMERIC:
case Types.DECIMAL:
return getNumeric(columnIndex,
(field.getMod() == -1) ? -1 : ((field.getMod() - 4) & 0xffff), true);
case Types.REAL:
return getFloat(columnIndex);
case Types.FLOAT:
case Types.DOUBLE:
return getDouble(columnIndex);
case Types.CHAR:
case Types.VARCHAR:
case Types.LONGVARCHAR:
return getString(columnIndex);
case Types.DATE:
return getDate(columnIndex);
case Types.TIME:
return getTime(columnIndex);
case Types.TIMESTAMP:
return getTimestamp(columnIndex, null);
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
return getBytes(columnIndex);
case Types.ARRAY:
return getArray(columnIndex);
case Types.CLOB:
return getClob(columnIndex);
case Types.BLOB:
return getBlob(columnIndex);
default:
String type = getPGType(columnIndex);
// if the backend doesn't know the type then coerce to String
if (type.equals("unknown")) {
return getString(columnIndex);
}
if (type.equals("uuid")) {
if (isBinary(columnIndex)) {
return getUUID(castNonNull(thisRow.get(columnIndex - 1)));
}
return getUUID(castNonNull(getString(columnIndex)));
}
// Specialized support for ref cursors is neater.
if (type.equals("refcursor")) {
// Fetch all results.
String cursorName = castNonNull(getString(columnIndex));
StringBuilder sb = new StringBuilder("FETCH ALL IN ");
Utils.escapeIdentifier(sb, cursorName);
// nb: no BEGIN triggered here. This is fine. If someone
// committed, and the cursor was not holdable (closing the
// cursor), we avoid starting a new xact and promptly causing
// it to fail. If the cursor *was* holdable, we don't want a
// new xact anyway since holdable cursor state isn't affected
// by xact boundaries. If our caller didn't commit at all, or
// autocommit was on, then we wouldn't issue a BEGIN anyway.
//
// We take the scrollability from the statement, but until
// we have updatable cursors it must be readonly.
ResultSet rs =
connection.execSQLQuery(sb.toString(), resultsettype, ResultSet.CONCUR_READ_ONLY);
//
// In long running transactions these backend cursors take up memory space
// we could close in rs.close(), but if the transaction is closed before the result set,
// then
// the cursor no longer exists
sb.setLength(0);
sb.append("CLOSE ");
Utils.escapeIdentifier(sb, cursorName);
connection.execSQLUpdate(sb.toString());
((PgResultSet) rs).setRefCursor(cursorName);
return rs;
}
if ("hstore".equals(type)) {
if (isBinary(columnIndex)) {
return HStoreConverter.fromBytes(castNonNull(thisRow.get(columnIndex - 1)),
connection.getEncoding());
}
return HStoreConverter.fromString(castNonNull(getString(columnIndex)));
}
// Caller determines what to do (JDBC3 overrides in this case)
return null;
}
}
@Pure
@EnsuresNonNull("rows")
private void checkScrollable() throws SQLException {
checkClosed();
if (resultsettype == ResultSet.TYPE_FORWARD_ONLY) {
throw new PSQLException(
GT.tr("Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."),
PSQLState.INVALID_CURSOR_STATE);
}
}
@Override
public boolean absolute(int index) throws SQLException {
checkScrollable();
// index is 1-based, but internally we use 0-based indices
int internalIndex;
if (index == 0) {
beforeFirst();
return false;
}
final int rows_size = rows.size();
// if index<0, count from the end of the result set, but check
// to be sure that it is not beyond the first index
if (index < 0) {
if (index >= -rows_size) {
internalIndex = rows_size + index;
} else {
beforeFirst();
return false;
}
} else {
// must be the case that index>0,
// find the correct place, assuming that
// the index is not too large
if (index <= rows_size) {
internalIndex = index - 1;
} else {
afterLast();
return false;
}
}
currentRow = internalIndex;
initRowBuffer();
onInsertRow = false;
return true;
}
@Override
public void afterLast() throws SQLException {
checkScrollable();
final int rows_size = rows.size();
if (rows_size > 0) {
currentRow = rows_size;
}
onInsertRow = false;
thisRow = null;
rowBuffer = null;
}
@Override
public void beforeFirst() throws SQLException {
checkScrollable();
if (!rows.isEmpty()) {
currentRow = -1;
}
onInsertRow = false;
thisRow = null;
rowBuffer = null;
}
@Override
public boolean first() throws SQLException {
checkScrollable();
if (rows.size() <= 0) {
return false;
}
currentRow = 0;
initRowBuffer();
onInsertRow = false;
return true;
}
@Override
public @Nullable Array getArray(String colName) throws SQLException {
return getArray(findColumn(colName));
}
protected Array makeArray(int oid, byte[] value) throws SQLException {
return new PgArray(connection, oid, value);
}
protected Array makeArray(int oid, String value) throws SQLException {
return new PgArray(connection, oid, value);
}
@Pure
@Override
public @Nullable Array getArray(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
int oid = fields[i - 1].getOID();
if (isBinary(i)) {
return makeArray(oid, value);
}
return makeArray(oid, castNonNull(getFixedString(i)));
}
public java.math.@Nullable BigDecimal getBigDecimal(@Positive int columnIndex) throws SQLException {
return getBigDecimal(columnIndex, -1);
}
public java.math.@Nullable BigDecimal getBigDecimal(String columnName) throws SQLException {
return getBigDecimal(findColumn(columnName));
}
public @Nullable Blob getBlob(String columnName) throws SQLException {
return getBlob(findColumn(columnName));
}
protected Blob makeBlob(long oid) throws SQLException {
return new PgBlob(connection, oid);
}
@Pure
public @Nullable Blob getBlob(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
return makeBlob(getLong(i));
}
public java.io.@Nullable Reader getCharacterStream(String columnName) throws SQLException {
return getCharacterStream(findColumn(columnName));
}
public java.io.@Nullable Reader getCharacterStream(int i) throws SQLException {
String value = getString(i);
if (value == null) {
return null;
}
// Version 7.2 supports AsciiStream for all the PG text types
// As the spec/javadoc for this method indicate this is to be used for
// large text values (i.e. LONGVARCHAR) PG doesn't have a separate
// long string datatype, but with toast the text datatype is capable of
// handling very large values. Thus the implementation ends up calling
// getString() since there is no current way to stream the value from the server
return new CharArrayReader(value.toCharArray());
}
public @Nullable Clob getClob(String columnName) throws SQLException {
return getClob(findColumn(columnName));
}
protected Clob makeClob(long oid) throws SQLException {
return new PgClob(connection, oid);
}
@Pure
public @Nullable Clob getClob(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
return makeClob(getLong(i));
}
public int getConcurrency() throws SQLException {
checkClosed();
return resultsetconcurrency;
}
@Override
public java.sql.@Nullable Date getDate(
int i, java.util.@Nullable Calendar cal) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
if (cal == null) {
cal = getDefaultCalendar();
}
if (isBinary(i)) {
int col = i - 1;
int oid = fields[col].getOID();
TimeZone tz = cal.getTimeZone();
if (oid == Oid.DATE) {
return connection.getTimestampUtils().toDateBin(tz, value);
} else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) {
// If backend provides just TIMESTAMP, we use "cal" timezone
// If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value
Timestamp timestamp = castNonNull(getTimestamp(i, cal));
// Here we just truncate date to 00:00 in a given time zone
return connection.getTimestampUtils().convertToDate(timestamp.getTime(), tz);
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "date"),
PSQLState.DATA_TYPE_MISMATCH);
}
}
return connection.getTimestampUtils().toDate(cal, castNonNull(getString(i)));
}
@Override
public @Nullable Time getTime(
int i, java.util.@Nullable Calendar cal) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
if (cal == null) {
cal = getDefaultCalendar();
}
if (isBinary(i)) {
int col = i - 1;
int oid = fields[col].getOID();
TimeZone tz = cal.getTimeZone();
if (oid == Oid.TIME || oid == Oid.TIMETZ) {
return connection.getTimestampUtils().toTimeBin(tz, value);
} else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) {
// If backend provides just TIMESTAMP, we use "cal" timezone
// If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value
Timestamp timestamp = getTimestamp(i, cal);
if (timestamp == null) {
return null;
}
long timeMillis = timestamp.getTime();
if (oid == Oid.TIMESTAMPTZ) {
// time zone == UTC since BINARY "timestamp with time zone" is always sent in UTC
// So we truncate days
return new Time(timeMillis % TimeUnit.DAYS.toMillis(1));
}
// Here we just truncate date part
return connection.getTimestampUtils().convertToTime(timeMillis, tz);
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "time"),
PSQLState.DATA_TYPE_MISMATCH);
}
}
String string = getString(i);
return connection.getTimestampUtils().toTime(cal, string);
}
//#if mvn.project.property.postgresql.jdbc.spec >= "JDBC4.2"
private java.time.@Nullable LocalTime getLocalTime(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
if (isBinary(i)) {
int col = i - 1;
int oid = fields[col].getOID();
if (oid == Oid.TIME) {
return connection.getTimestampUtils().toLocalTimeBin(value);
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "time"),
PSQLState.DATA_TYPE_MISMATCH);
}
}
String string = getString(i);
return connection.getTimestampUtils().toLocalTime(string);
}
//#endif
@Pure
@Override
public @Nullable Timestamp getTimestamp(
int i, java.util.@Nullable Calendar cal) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
if (cal == null) {
cal = getDefaultCalendar();
}
int col = i - 1;
int oid = fields[col].getOID();
if (isBinary(i)) {
if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) {
boolean hasTimeZone = oid == Oid.TIMESTAMPTZ;
TimeZone tz = cal.getTimeZone();
return connection.getTimestampUtils().toTimestampBin(tz, value, hasTimeZone);
} else {
// JDBC spec says getTimestamp of Time and Date must be supported
long millis;
if (oid == Oid.TIME || oid == Oid.TIMETZ) {
Time time = getTime(i, cal);
if (time == null) {
return null;
}
millis = time.getTime();
} else if (oid == Oid.DATE) {
Date date = getDate(i, cal);
if (date == null) {
return null;
}
millis = date.getTime();
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "timestamp"),
PSQLState.DATA_TYPE_MISMATCH);
}
return new Timestamp(millis);
}
}
// If this is actually a timestamptz, the server-provided timezone will override
// the one we pass in, which is the desired behaviour. Otherwise, we'll
// interpret the timezone-less value in the provided timezone.
String string = castNonNull(getString(i));
if (oid == Oid.TIME || oid == Oid.TIMETZ) {
// If server sends us a TIME, we ensure java counterpart has date of 1970-01-01
return new Timestamp(connection.getTimestampUtils().toTime(cal, string).getTime());
}
return connection.getTimestampUtils().toTimestamp(cal, string);
}
//#if mvn.project.property.postgresql.jdbc.spec >= "JDBC4.2"
private java.time.@Nullable OffsetDateTime getOffsetDateTime(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
int col = i - 1;
int oid = fields[col].getOID();
if (isBinary(i)) {
if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) {
return connection.getTimestampUtils().toOffsetDateTimeBin(value);
} else if (oid == Oid.TIMETZ) {
// JDBC spec says timetz must be supported
Time time = getTime(i);
if (time == null) {
return null;
}
return connection.getTimestampUtils().toOffsetDateTime(time);
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "timestamptz"),
PSQLState.DATA_TYPE_MISMATCH);
}
}
// If this is actually a timestamptz, the server-provided timezone will override
// the one we pass in, which is the desired behaviour. Otherwise, we'll
// interpret the timezone-less value in the provided timezone.
String string = castNonNull(getString(i));
if (oid == Oid.TIMETZ) {
// JDBC spec says timetz must be supported
// If server sends us a TIMETZ, we ensure java counterpart has date of 1970-01-01
Calendar cal = getDefaultCalendar();
Time time = connection.getTimestampUtils().toTime(cal, string);
return connection.getTimestampUtils().toOffsetDateTime(time);
}
return connection.getTimestampUtils().toOffsetDateTime(string);
}
private java.time.@Nullable LocalDateTime getLocalDateTime(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
int col = i - 1;
int oid = fields[col].getOID();
if (oid != Oid.TIMESTAMP) {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "timestamp"),
PSQLState.DATA_TYPE_MISMATCH);
}
if (isBinary(i)) {
return connection.getTimestampUtils().toLocalDateTimeBin(value);
}
String string = castNonNull(getString(i));
return connection.getTimestampUtils().toLocalDateTime(string);
}
//#endif
public java.sql.@Nullable Date getDate(
String c, java.util.@Nullable Calendar cal) throws SQLException {
return getDate(findColumn(c), cal);
}
public @Nullable Time getTime(
String c, java.util.@Nullable Calendar cal) throws SQLException {
return getTime(findColumn(c), cal);
}
public @Nullable Timestamp getTimestamp(
String c, java.util.@Nullable Calendar cal) throws SQLException {
return getTimestamp(findColumn(c), cal);
}
public int getFetchDirection() throws SQLException {
checkClosed();
return fetchdirection;
}
public @Nullable Object getObjectImpl(
String columnName, @Nullable Map> map) throws SQLException {
return getObjectImpl(findColumn(columnName), map);
}
/*
* This checks against map for the type of column i, and if found returns an object based on that
* mapping. The class must implement the SQLData interface.
*/
public @Nullable Object getObjectImpl(
int i, @Nullable Map> map) throws SQLException {
checkClosed();
if (map == null || map.isEmpty()) {
return getObject(i);
}
throw org.postgresql.Driver.notImplemented(this.getClass(), "getObjectImpl(int,Map)");
}
public @Nullable Ref getRef(String columnName) throws SQLException {
return getRef(findColumn(columnName));
}
public @Nullable Ref getRef(int i) throws SQLException {
checkClosed();
// The backend doesn't yet have SQL3 REF types
throw org.postgresql.Driver.notImplemented(this.getClass(), "getRef(int)");
}
@Override
public int getRow() throws SQLException {
checkClosed();
if (onInsertRow) {
return 0;
}
final int rows_size = rows.size();
if (currentRow < 0 || currentRow >= rows_size) {
return 0;
}
return rowOffset + currentRow + 1;
}
// This one needs some thought, as not all ResultSets come from a statement
public Statement getStatement() throws SQLException {
checkClosed();
return statement;
}
public int getType() throws SQLException {
checkClosed();
return resultsettype;
}
@Pure
@Override
public boolean isAfterLast() throws SQLException {
checkClosed();
if (onInsertRow) {
return false;
}
castNonNull(rows, "rows");
final int rows_size = rows.size();
if (rowOffset + rows_size == 0) {
return false;
}
return (currentRow >= rows_size);
}
@Pure
@Override
public boolean isBeforeFirst() throws SQLException {
checkClosed();
if (onInsertRow) {
return false;
}
return ((rowOffset + currentRow) < 0 && !castNonNull(rows, "rows").isEmpty());
}
@Override
public boolean isFirst() throws SQLException {
checkClosed();
if (onInsertRow) {
return false;
}
final int rows_size = rows.size();
if (rowOffset + rows_size == 0) {
return false;
}
return ((rowOffset + currentRow) == 0);
}
@Override
public boolean isLast() throws SQLException {
checkClosed();
if (onInsertRow) {
return false;
}
List rows = castNonNull(this.rows, "rows");
final int rows_size = rows.size();
if (rows_size == 0) {
return false; // No rows.
}
if (currentRow != (rows_size - 1)) {
return false; // Not on the last row of this block.
}
// We are on the last row of the current block.
ResultCursor cursor = this.cursor;
if (cursor == null) {
// This is the last block and therefore the last row.
return true;
}
if (maxRows > 0 && rowOffset + currentRow == maxRows) {
// We are implicitly limited by maxRows.
return true;
}
// Now the more painful case begins.
// We are on the last row of the current block, but we don't know if the
// current block is the last block; we must try to fetch some more data to
// find out.
// We do a fetch of the next block, then prepend the current row to that
// block (so currentRow == 0). This works as the current row
// must be the last row of the current block if we got this far.
rowOffset += rows_size - 1; // Discarding all but one row.
// Work out how many rows maxRows will let us fetch.
int fetchRows = fetchSize;
if (maxRows != 0) {
if (fetchRows == 0 || rowOffset + fetchRows > maxRows) {
// Fetch would exceed maxRows, limit it.
fetchRows = maxRows - rowOffset;
}
}
// Do the actual fetch.
connection.getQueryExecutor().fetch(cursor, new CursorResultHandler(), fetchRows);
rows = castNonNull(this.rows, "rows");
// Now prepend our one saved row and move to it.
rows.add(0, castNonNull(thisRow));
currentRow = 0;
// Finally, now we can tell if we're the last row or not.
return (rows.size() == 1);
}
@Override
public boolean last() throws SQLException {
checkScrollable();
List rows = castNonNull(this.rows, "rows");
final int rows_size = rows.size();
if (rows_size <= 0) {
return false;
}
currentRow = rows_size - 1;
initRowBuffer();
onInsertRow = false;
return true;
}
@Override
public boolean previous() throws SQLException {
checkScrollable();
if (onInsertRow) {
throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."),
PSQLState.INVALID_CURSOR_STATE);
}
if (currentRow - 1 < 0) {
currentRow = -1;
thisRow = null;
rowBuffer = null;
return false;
} else {
currentRow--;
}
initRowBuffer();
return true;
}
@Override
public boolean relative(int rows) throws SQLException {
checkScrollable();
if (onInsertRow) {
throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."),
PSQLState.INVALID_CURSOR_STATE);
}
// have to add 1 since absolute expects a 1-based index
int index = currentRow + 1 + rows;
if (index < 0) {
beforeFirst();
return false;
}
return absolute(index);
}
public void setFetchDirection(int direction) throws SQLException {
checkClosed();
switch (direction) {
case ResultSet.FETCH_FORWARD:
break;
case ResultSet.FETCH_REVERSE:
case ResultSet.FETCH_UNKNOWN:
checkScrollable();
break;
default:
throw new PSQLException(GT.tr("Invalid fetch direction constant: {0}.", direction),
PSQLState.INVALID_PARAMETER_VALUE);
}
this.fetchdirection = direction;
}
public synchronized void cancelRowUpdates() throws SQLException {
checkClosed();
if (onInsertRow) {
throw new PSQLException(GT.tr("Cannot call cancelRowUpdates() when on the insert row."),
PSQLState.INVALID_CURSOR_STATE);
}
if (doingUpdates) {
doingUpdates = false;
clearRowBuffer(true);
}
}
public synchronized void deleteRow() throws SQLException {
checkUpdateable();
if (onInsertRow) {
throw new PSQLException(GT.tr("Cannot call deleteRow() when on the insert row."),
PSQLState.INVALID_CURSOR_STATE);
}
if (isBeforeFirst()) {
throw new PSQLException(
GT.tr(
"Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."),
PSQLState.INVALID_CURSOR_STATE);
}
if (isAfterLast()) {
throw new PSQLException(
GT.tr(
"Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."),
PSQLState.INVALID_CURSOR_STATE);
}
List rows = castNonNull(this.rows, "rows");
if (rows.isEmpty()) {
throw new PSQLException(GT.tr("There are no rows in this ResultSet."),
PSQLState.INVALID_CURSOR_STATE);
}
List primaryKeys = castNonNull(this.primaryKeys, "primaryKeys");
int numKeys = primaryKeys.size();
if (deleteStatement == null) {
StringBuilder deleteSQL =
new StringBuilder("DELETE FROM ").append(onlyTable).append(tableName).append(" where ");
for (int i = 0; i < numKeys; i++) {
Utils.escapeIdentifier(deleteSQL, primaryKeys.get(i).name);
deleteSQL.append(" = ?");
if (i < numKeys - 1) {
deleteSQL.append(" and ");
}
}
deleteStatement = connection.prepareStatement(deleteSQL.toString());
}
deleteStatement.clearParameters();
for (int i = 0; i < numKeys; i++) {
deleteStatement.setObject(i + 1, primaryKeys.get(i).getValue());
}
deleteStatement.executeUpdate();
rows.remove(currentRow);
currentRow--;
moveToCurrentRow();
}
@Override
public synchronized void insertRow() throws SQLException {
checkUpdateable();
castNonNull(rows, "rows");
if (!onInsertRow) {
throw new PSQLException(GT.tr("Not on the insert row."), PSQLState.INVALID_CURSOR_STATE);
}
HashMap updateValues = this.updateValues;
if (updateValues == null || updateValues.isEmpty()) {
throw new PSQLException(GT.tr("You must specify at least one column value to insert a row."),
PSQLState.INVALID_PARAMETER_VALUE);
}
// loop through the keys in the insertTable and create the sql statement
// we have to create the sql every time since the user could insert different
// columns each time
StringBuilder insertSQL = new StringBuilder("INSERT INTO ").append(tableName).append(" (");
StringBuilder paramSQL = new StringBuilder(") values (");
Iterator columnNames = updateValues.keySet().iterator();
int numColumns = updateValues.size();
for (int i = 0; columnNames.hasNext(); i++) {
String columnName = columnNames.next();
Utils.escapeIdentifier(insertSQL, columnName);
if (i < numColumns - 1) {
insertSQL.append(", ");
paramSQL.append("?,");
} else {
paramSQL.append("?)");
}
}
insertSQL.append(paramSQL.toString());
PreparedStatement insertStatement = null;
Tuple rowBuffer = castNonNull(this.rowBuffer);
try {
insertStatement = connection.prepareStatement(insertSQL.toString(), Statement.RETURN_GENERATED_KEYS);
Iterator