Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright (c) 2004, PostgreSQL Global Development Group
* See the LICENSE file in the project root for more information.
*/
package io.mogdb.jdbc;
import io.mogdb.PGResultSetMetaData;
import io.mogdb.PGStatement;
import io.mogdb.core.BaseConnection;
import io.mogdb.core.BaseStatement;
import io.mogdb.core.Encoding;
import io.mogdb.core.Field;
import io.mogdb.core.Oid;
import io.mogdb.core.Query;
import io.mogdb.core.ResultCursor;
import io.mogdb.core.ResultHandlerBase;
import io.mogdb.core.TypeInfo;
import io.mogdb.core.Utils;
import io.mogdb.core.types.PGBlob;
import io.mogdb.core.types.PGClob;
import io.mogdb.util.ByteConverter;
import io.mogdb.util.GT;
import io.mogdb.util.HStoreConverter;
import io.mogdb.util.PGbytea;
import io.mogdb.util.PGobject;
import io.mogdb.util.PGtokenizer;
import io.mogdb.util.PSQLException;
import io.mogdb.util.PSQLState;
import java.io.ByteArrayInputStream;
import java.io.CharArrayReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.Date;
import java.sql.NClob;
import java.sql.PreparedStatement;
import java.sql.Ref;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.sql.SQLXML;
import java.sql.Statement;
import java.sql.Time;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.TimeZone;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
public class PgResultSet implements ResultSet, io.mogdb.PGRefCursorResultSet {
// needed for updateable result set support
private boolean updateable = false;
private boolean doingUpdates = false;
private HashMap updateValues = null;
private boolean usingOID = false; // are we using the OID for the primary key?
private List primaryKeys; // list of primary keys
private boolean singleTable = false;
private String onlyTable = "";
private String tableName = null;
private PreparedStatement updateStatement = null;
private PreparedStatement insertStatement = null;
private PreparedStatement deleteStatement = null;
private PreparedStatement selectStatement = null;
private final int resultsettype;
private final int resultsetconcurrency;
private int fetchdirection = ResultSet.FETCH_UNKNOWN;
private TimeZone defaultTimeZone;
protected final BaseConnection connection; // the connection we belong to
protected final BaseStatement statement; // the statement we belong to
protected final Field[] fields; // Field metadata for this resultset.
protected final Query originalQuery; // Query we originated from
protected final int maxRows; // Maximum rows in this resultset (might be 0).
protected final int maxFieldSize; // Maximum field size in this resultset (might be 0).
protected List rows; // Current page of results.
protected int current_row = -1; // Index into 'rows' of our currrent row (0-based)
protected int row_offset; // Offset of row 0 in the actual resultset
protected byte[][] this_row; // copy of the current result row
protected SQLWarning warnings = null; // The warning chain
/**
* True if the last obtained column value was SQL NULL as specified by {@link #wasNull}. The value
* is always updated by the {@link #checkResultSet} method.
*/
protected boolean wasNullFlag = false;
protected boolean onInsertRow = false;
// are we on the insert row (for JDBC2 updatable resultsets)?
private byte[][] rowBuffer = null; // updateable rowbuffer
protected int fetchSize; // Current fetch size (might be 0).
protected ResultCursor cursor; // Cursor for fetching additional data.
private Map columnNameIndexMap; // Speed up findColumn by caching lookups
private ResultSetMetaData rsMetaData;
protected ResultSetMetaData createMetaData() throws SQLException {
return new PgResultSetMetaData(connection, fields);
}
public ResultSetMetaData getMetaData() throws SQLException {
checkClosed();
if (rsMetaData == null) {
rsMetaData = createMetaData();
}
return rsMetaData;
}
PgResultSet(Query originalQuery, BaseStatement statement, Field[] fields, List tuples,
ResultCursor cursor, int maxRows, int maxFieldSize, int rsType, int rsConcurrency,
int rsHoldability) throws SQLException {
// Fail-fast on invalid null inputs
if (tuples == null) {
throw new NullPointerException("tuples must be non-null");
}
if (fields == null) {
throw new NullPointerException("fields must be non-null");
}
this.originalQuery = originalQuery;
this.connection = (BaseConnection) statement.getConnection();
this.statement = statement;
this.fields = fields;
this.rows = tuples;
clientLogicGetData();
this.cursor = cursor;
this.maxRows = maxRows;
this.maxFieldSize = maxFieldSize;
this.resultsettype = rsType;
this.resultsetconcurrency = rsConcurrency;
}
private static final char[] HEX_ARRAY = "0123456789ABCDEF".toCharArray();
/**
* helper function for clientLogicGetData when the client logic data received as binary
* @param bytes bytes
* @return hexadecimal presentation of the binary data
*/
public static String bytesArrayToHexString(byte[] bytes) {
char[] hexCharArray = new char[bytes.length * 2];
for (int i = 0; i < bytes.length; i++) {
int val = bytes[i] & 0xFF;
hexCharArray[i * 2] = HEX_ARRAY[val >>> 4];
hexCharArray[i * 2 + 1] = HEX_ARRAY[val & 0x0F];
}
return new String(hexCharArray);
}
/**
* This method is used to transform data that is client logic from client logic back to user input format
*/
private void clientLogicGetData() {
ClientLogic clientLogic = connection.getClientLogic();
// if client logic is off, no need to progress
if (clientLogic == null) {
return;
}
Encoding encoding = null;
try {
encoding = connection.getEncoding();
} catch (SQLException e1) {
// If we cannot get the encoding object, cannot move on.
connection.getLogger().error("client logic failed - could not get connection encoding");
return;
}
int fieldIndex = 0;
// Loop thru the list of fields
for (Field field : this.fields) {
if (ClientLogic.isClientLogicField(field.getOID())) {
for (int rowIndex = 0; rowIndex < rows.size(); ++rowIndex) {
try {
if (rows.get(rowIndex)[fieldIndex] != null) {
String clientLogicValue = "";
// The client logic fields may arrive as binary or as UTF-8.
// Need to find out what is it and act accordingly
if (field.getFormat() == Field.BINARY_FORMAT) {
clientLogicValue = "\\x" + bytesArrayToHexString(rows.get(rowIndex)[fieldIndex]);
} else {
clientLogicValue = encoding.decode(rows.get(rowIndex)[fieldIndex]);
}
String userInputValue = "";
try {
userInputValue = clientLogic.runClientLogic(clientLogicValue, field.getMod());
// Encode the data back the same way, so the field is now not binary
}
catch (ClientLogicException e) {
connection.getLogger().error("client logic failed for field:" + field.getColumnLabel() +
", value: " + clientLogicValue + " Error:" +
e.getErrorCode() + ":" + e.getErrorText());
}
rows.get(rowIndex)[fieldIndex] = encoding.encode(userInputValue);
}
}
catch (IOException e) {
connection.getLogger().error("client logic failed encoding on IOException for field:" + field.getColumnLabel());
}
}
}
++fieldIndex;
}
}
public java.net.URL getURL(int columnIndex) throws SQLException {
connection.getLogger().trace("[" + connection.getSocketAddress() + "] " + " getURL columnIndex: " + columnIndex);
checkClosed();
throw io.mogdb.Driver.notImplemented(this.getClass(), "getURL(int)");
}
public java.net.URL getURL(String columnName) throws SQLException {
return getURL(findColumn(columnName));
}
protected Object internalGetObject(int columnIndex, Field field) throws SQLException {
switch (getSQLType(columnIndex)) {
case Types.BOOLEAN:
return getBoolean(columnIndex);
case Types.BIT:
return getBit(columnIndex);
case Types.SQLXML:
return getSQLXML(columnIndex);
case Types.TINYINT:
case Types.SMALLINT:
case Types.INTEGER:
return getInt(columnIndex);
case Types.BIGINT:
return getLong(columnIndex);
case Types.NUMERIC:
case Types.DECIMAL:
return getBigDecimal(columnIndex,
(field.getMod() == -1) ? -1 : ((field.getMod() - 4) & 0xffff));
case Types.REAL:
return getFloat(columnIndex);
case Types.FLOAT:
case Types.DOUBLE:
return getDouble(columnIndex);
case Types.CHAR:
case Types.VARCHAR:
case Types.LONGVARCHAR:
return getString(columnIndex);
case Types.DATE:
return getDate(columnIndex);
case Types.TIME:
return getTime(columnIndex);
case Types.TIMESTAMP:
return getTimestamp(columnIndex, null);
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
return getBytes(columnIndex);
case Types.ARRAY:
return getArray(columnIndex);
case Types.CLOB:
return getClob(columnIndex);
case Types.BLOB:
return getBlob(columnIndex);
default:
String type = getPGType(columnIndex);
// if the backend doesn't know the type then coerce to String
if (type.equals("unknown")) {
return getString(columnIndex);
}
if (type.equals("uuid")) {
if (isBinary(columnIndex)) {
return getUUID(this_row[columnIndex - 1]);
}
return getUUID(getString(columnIndex));
}
// Specialized support for ref cursors is neater.
if (type.equals("refcursor")) {
// Fetch all results.
String cursorName = getString(columnIndex);
StringBuilder sb = new StringBuilder("FETCH ALL IN ");
Utils.escapeIdentifier(sb, cursorName);
// nb: no BEGIN triggered here. This is fine. If someone
// committed, and the cursor was not holdable (closing the
// cursor), we avoid starting a new xact and promptly causing
// it to fail. If the cursor *was* holdable, we don't want a
// new xact anyway since holdable cursor state isn't affected
// by xact boundaries. If our caller didn't commit at all, or
// autocommit was on, then we wouldn't issue a BEGIN anyway.
//
// We take the scrollability from the statement, but until
// we have updatable cursors it must be readonly.
ResultSet rs =
connection.execSQLQuery(sb.toString(), resultsettype, ResultSet.CONCUR_READ_ONLY);
//
// In long running transactions these backend cursors take up memory space
// we could close in rs.close(), but if the transaction is closed before the result set,
// then
// the cursor no longer exists
sb.setLength(0);
sb.append("CLOSE ");
Utils.escapeIdentifier(sb, cursorName);
connection.execSQLUpdate(sb.toString());
((PgResultSet) rs).setRefCursor(cursorName);
return rs;
}
if ("hstore".equals(type)) {
if (isBinary(columnIndex)) {
return HStoreConverter.fromBytes(this_row[columnIndex - 1], connection.getEncoding());
}
return HStoreConverter.fromString(getString(columnIndex));
}
// Caller determines what to do (JDBC3 overrides in this case)
return null;
}
}
private void checkScrollable() throws SQLException {
checkClosed();
if (resultsettype == ResultSet.TYPE_FORWARD_ONLY) {
throw new PSQLException(
GT.tr("Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."),
PSQLState.INVALID_CURSOR_STATE);
}
}
public boolean absolute(int index) throws SQLException {
checkScrollable();
// index is 1-based, but internally we use 0-based indices
int internalIndex;
if (index == 0) {
beforeFirst();
return false;
}
final int rows_size = rows.size();
// if index<0, count from the end of the result set, but check
// to be sure that it is not beyond the first index
if (index < 0) {
if (index >= -rows_size) {
internalIndex = rows_size + index;
} else {
beforeFirst();
return false;
}
} else {
// must be the case that index>0,
// find the correct place, assuming that
// the index is not too large
if (index <= rows_size) {
internalIndex = index - 1;
} else {
afterLast();
return false;
}
}
current_row = internalIndex;
initRowBuffer();
onInsertRow = false;
return true;
}
public void afterLast() throws SQLException {
checkScrollable();
final int rows_size = rows.size();
if (rows_size > 0) {
current_row = rows_size;
}
onInsertRow = false;
this_row = null;
rowBuffer = null;
}
public void beforeFirst() throws SQLException {
checkScrollable();
if (!rows.isEmpty()) {
current_row = -1;
}
onInsertRow = false;
this_row = null;
rowBuffer = null;
}
public boolean first() throws SQLException {
checkScrollable();
if (rows.size() <= 0) {
return false;
}
current_row = 0;
initRowBuffer();
onInsertRow = false;
return true;
}
public java.sql.Array getArray(String colName) throws SQLException {
return getArray(findColumn(colName));
}
protected Array makeArray(int oid, byte[] value) throws SQLException {
return new PgArray(connection, oid, value);
}
protected Array makeArray(int oid, String value) throws SQLException {
return new PgArray(connection, oid, value);
}
public java.sql.Array getArray(int i) throws SQLException {
checkResultSet(i);
if (wasNullFlag) {
return null;
}
int oid = fields[i - 1].getOID();
if (isBinary(i)) {
return makeArray(oid, this_row[i - 1]);
}
return makeArray(oid, getFixedString(i));
}
public java.math.BigDecimal getBigDecimal(int columnIndex) throws SQLException {
return getBigDecimal(columnIndex, -1);
}
public java.math.BigDecimal getBigDecimal(String columnName) throws SQLException {
return getBigDecimal(findColumn(columnName));
}
public Blob getBlob(String columnName) throws SQLException {
return getBlob(findColumn(columnName));
}
protected Blob makeBlob(long oid) throws SQLException {
return new PgBlob(connection, oid);
}
public byte[] toBytes(String str) {
if(str == null || str.trim().equals("")) {
return new byte[0];
}
byte[] bytes = new byte[str.length() / 2];
for(int i = 0; i < str.length() / 2; i++) {
String subStr = str.substring(i * 2, i * 2 + 2);
bytes[i] = (byte) Integer.parseInt(subStr, 16);
}
return bytes;
}
public Blob getBlob(int i) throws SQLException {
checkResultSet(i);
if (wasNullFlag) {
return null;
}
if (isBinary(i)) {
PGBlob blob = new PGBlob();
blob.setBytes(1, this_row[i - 1]);
return blob;
}
int oid = this.fields[i - 1].getOID();
byte[] byt;
if (oid == Oid.BYTEA) {
byt = trimBytes(i, PGbytea.toBytes(this_row[i - 1]));
} else if (oid == Oid.BLOB) {
byt = toBytes(getString(1));
} else {
byt = trimBytes(i, this_row[i - 1]);
}
PGBlob blob = new PGBlob();
blob.setBytes(1, byt);
return blob;
}
public java.io.Reader getCharacterStream(String columnName) throws SQLException {
return getCharacterStream(findColumn(columnName));
}
public java.io.Reader getCharacterStream(int i) throws SQLException {
checkResultSet(i);
if (wasNullFlag) {
return null;
}
// Version 7.2 supports AsciiStream for all the PG text types
// As the spec/javadoc for this method indicate this is to be used for
// large text values (i.e. LONGVARCHAR) PG doesn't have a separate
// long string datatype, but with toast the text datatype is capable of
// handling very large values. Thus the implementation ends up calling
// getString() since there is no current way to stream the value from the server
return new CharArrayReader(getString(i).toCharArray());
}
public Clob getClob(String columnName) throws SQLException {
return getClob(findColumn(columnName));
}
protected Clob makeClob(long oid) throws SQLException {
return new PgClob(connection, oid);
}
public Clob getClob(int i) throws SQLException {
checkResultSet(i);
if (wasNullFlag) {
return null;
}
String str = getString(i);
PGClob clob = new PGClob();
clob.setString(1, str);
return clob;
}
public int getConcurrency() throws SQLException {
checkClosed();
return resultsetconcurrency;
}
@Override
public java.sql.Date getDate(int i, java.util.Calendar cal) throws SQLException {
checkResultSet(i);
if (wasNullFlag) {
return null;
}
if (cal == null) {
cal = getDefaultCalendar();
}
if (isBinary(i)) {
int col = i - 1;
int oid = fields[col].getOID();
TimeZone tz = cal.getTimeZone();
if (oid == Oid.DATE) {
return connection.getTimestampUtils().toDateBin(tz, this_row[col]);
} else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) {
// If backend provides just TIMESTAMP, we use "cal" timezone
// If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value
Timestamp timestamp = getTimestamp(i, cal);
// Here we just truncate date to 00:00 in a given time zone
return connection.getTimestampUtils().convertToDate(timestamp.getTime(), tz);
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "date"),
PSQLState.DATA_TYPE_MISMATCH);
}
}
return connection.getTimestampUtils().toDate(cal, getString(i));
}
@Override
public Time getTime(int i, java.util.Calendar cal) throws SQLException {
checkResultSet(i);
if (wasNullFlag) {
return null;
}
if (cal == null) {
cal = getDefaultCalendar();
}
if (isBinary(i)) {
int col = i - 1;
int oid = fields[col].getOID();
TimeZone tz = cal.getTimeZone();
if (oid == Oid.TIME || oid == Oid.TIMETZ) {
return connection.getTimestampUtils().toTimeBin(tz, this_row[col]);
} else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) {
// If backend provides just TIMESTAMP, we use "cal" timezone
// If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value
Timestamp timestamp = getTimestamp(i, cal);
long timeMillis = timestamp.getTime();
if (oid == Oid.TIMESTAMPTZ) {
// time zone == UTC since BINARY "timestamp with time zone" is always sent in UTC
// So we truncate days
return new Time(timeMillis % TimeUnit.DAYS.toMillis(1));
}
// Here we just truncate date part
return connection.getTimestampUtils().convertToTime(timeMillis, tz);
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "time"),
PSQLState.DATA_TYPE_MISMATCH);
}
}
String string = getString(i);
return connection.getTimestampUtils().toTime(cal, string);
}
private java.time.LocalTime getLocalTime(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
if (isBinary(i)) {
int col = i - 1;
int oid = fields[col].getOID();
if (oid == Oid.TIME) {
return connection.getTimestampUtils().toLocalTimeBin(value);
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "time"),
PSQLState.DATA_TYPE_MISMATCH);
}
}
String string = getString(i);
return connection.getTimestampUtils().toLocalTime(string);
}
@Override
public Timestamp getTimestamp(int i, java.util.Calendar cal) throws SQLException {
checkResultSet(i);
if (wasNullFlag) {
return null;
}
if (cal == null) {
cal = getDefaultCalendar();
}
int col = i - 1;
int oid = fields[col].getOID();
if (isBinary(i)) {
if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) {
boolean hasTimeZone = oid == Oid.TIMESTAMPTZ;
TimeZone tz = cal.getTimeZone();
return connection.getTimestampUtils().toTimestampBin(tz, this_row[col], hasTimeZone);
} else {
// JDBC spec says getTimestamp of Time and Date must be supported
long millis;
if (oid == Oid.TIME || oid == Oid.TIMETZ) {
millis = getTime(i, cal).getTime();
} else if (oid == Oid.DATE) {
millis = getDate(i, cal).getTime();
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "timestamp"),
PSQLState.DATA_TYPE_MISMATCH);
}
return new Timestamp(millis);
}
}
// If this is actually a timestamptz, the server-provided timezone will override
// the one we pass in, which is the desired behaviour. Otherwise, we'll
// interpret the timezone-less value in the provided timezone.
String string = getString(i);
if (oid == Oid.TIME || oid == Oid.TIMETZ) {
// If server sends us a TIME, we ensure java counterpart has date of 1970-01-01
return new Timestamp(connection.getTimestampUtils().toTime(cal, string).getTime());
}
return connection.getTimestampUtils().toTimestamp(cal, string);
}
private java.time.OffsetDateTime getOffsetDateTime(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
int col = i - 1;
int oid = fields[col].getOID();
if (isBinary(i)) {
if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) {
return connection.getTimestampUtils().toOffsetDateTimeBin(value);
} else if (oid == Oid.TIMETZ) {
// JDBC spec says timetz must be supported
Time time = getTime(i);
if (time == null) {
return null;
}
return connection.getTimestampUtils().toOffsetDateTime(time);
} else {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "timestamptz"),
PSQLState.DATA_TYPE_MISMATCH);
}
}
// If this is actually a timestamptz, the server-provided timezone will override
// the one we pass in, which is the desired behaviour. Otherwise, we'll
// interpret the timezone-less value in the provided timezone.
String string = getString(i);
if (oid == Oid.TIMETZ) {
// JDBC spec says timetz must be supported
// If server sends us a TIMETZ, we ensure java counterpart has date of 1970-01-01
Calendar cal = getDefaultCalendar();
Time time = connection.getTimestampUtils().toTime(cal, string);
return connection.getTimestampUtils().toOffsetDateTime(time);
}
return connection.getTimestampUtils().toOffsetDateTime(string);
}
private java.time.LocalDateTime getLocalDateTime(int i) throws SQLException {
byte[] value = getRawValue(i);
if (value == null) {
return null;
}
int col = i - 1;
int oid = fields[col].getOID();
if (oid != Oid.TIMESTAMP) {
throw new PSQLException(
GT.tr("Cannot convert the column of type {0} to requested type {1}.",
Oid.toString(oid), "timestamp"),
PSQLState.DATA_TYPE_MISMATCH);
}
if (isBinary(i)) {
return connection.getTimestampUtils().toLocalDateTimeBin(value);
}
String string = getString(i);
return connection.getTimestampUtils().toLocalDateTime(string);
}
public java.sql.Date getDate(String c, java.util.Calendar cal) throws SQLException {
return getDate(findColumn(c), cal);
}
public Time getTime(String c, java.util.Calendar cal) throws SQLException {
return getTime(findColumn(c), cal);
}
public Timestamp getTimestamp(String c, java.util.Calendar cal) throws SQLException {
return getTimestamp(findColumn(c), cal);
}
public int getFetchDirection() throws SQLException {
checkClosed();
return fetchdirection;
}
public Object getObjectImpl(String columnName, Map> map) throws SQLException {
return getObjectImpl(findColumn(columnName), map);
}
/*
* This checks against map for the type of column i, and if found returns an object based on that
* mapping. The class must implement the SQLData interface.
*/
public Object getObjectImpl(int i, Map> map) throws SQLException {
checkClosed();
if (map == null || map.isEmpty()) {
return getObject(i);
}
throw io.mogdb.Driver.notImplemented(this.getClass(), "getObjectImpl(int,Map)");
}
public Ref getRef(String columnName) throws SQLException {
return getRef(findColumn(columnName));
}
public Ref getRef(int i) throws SQLException {
checkClosed();
// The backend doesn't yet have SQL3 REF types
throw io.mogdb.Driver.notImplemented(this.getClass(), "getRef(int)");
}
public int getRow() throws SQLException {
checkClosed();
if (onInsertRow) {
return 0;
}
final int rows_size = rows.size();
if (current_row < 0 || current_row >= rows_size) {
return 0;
}
return row_offset + current_row + 1;
}
// This one needs some thought, as not all ResultSets come from a statement
public Statement getStatement() throws SQLException {
checkClosed();
return statement;
}
public int getType() throws SQLException {
checkClosed();
return resultsettype;
}
public boolean isAfterLast() throws SQLException {
checkClosed();
if (onInsertRow) {
return false;
}
final int rows_size = rows.size();
if (row_offset + rows_size == 0) {
return false;
}
return (current_row >= rows_size);
}
public boolean isBeforeFirst() throws SQLException {
checkClosed();
if (onInsertRow) {
return false;
}
return ((row_offset + current_row) < 0 && !rows.isEmpty());
}
public boolean isFirst() throws SQLException {
checkClosed();
if (onInsertRow) {
return false;
}
final int rows_size = rows.size();
if (row_offset + rows_size == 0) {
return false;
}
return ((row_offset + current_row) == 0);
}
public boolean isLast() throws SQLException {
checkClosed();
if (onInsertRow) {
return false;
}
final int rows_size = rows.size();
if (rows_size == 0) {
return false; // No rows.
}
if (current_row != (rows_size - 1)) {
return false; // Not on the last row of this block.
}
// We are on the last row of the current block.
if (cursor == null) {
// This is the last block and therefore the last row.
return true;
}
if (maxRows > 0 && row_offset + current_row == maxRows) {
// We are implicitly limited by maxRows.
return true;
}
// Now the more painful case begins.
// We are on the last row of the current block, but we don't know if the
// current block is the last block; we must try to fetch some more data to
// find out.
// We do a fetch of the next block, then prepend the current row to that
// block (so current_row == 0). This works as the current row
// must be the last row of the current block if we got this far.
row_offset += rows_size - 1; // Discarding all but one row.
// Work out how many rows maxRows will let us fetch.
int fetchRows = fetchSize;
if (maxRows != 0) {
if (fetchRows == 0 || row_offset + fetchRows > maxRows) {
// Fetch would exceed maxRows, limit it.
fetchRows = maxRows - row_offset;
}
}
// Do the actual fetch.
connection.getQueryExecutor().fetch(cursor, new CursorResultHandler(), fetchRows);
// Now prepend our one saved row and move to it.
rows.add(0, this_row);
current_row = 0;
// Finally, now we can tell if we're the last row or not.
return (rows.size() == 1);
}
public boolean last() throws SQLException {
checkScrollable();
final int rows_size = rows.size();
if (rows_size <= 0) {
return false;
}
current_row = rows_size - 1;
initRowBuffer();
onInsertRow = false;
return true;
}
public boolean previous() throws SQLException {
checkScrollable();
if (onInsertRow) {
throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."),
PSQLState.INVALID_CURSOR_STATE);
}
if (current_row - 1 < 0) {
current_row = -1;
this_row = null;
rowBuffer = null;
return false;
} else {
current_row--;
}
initRowBuffer();
return true;
}
public boolean relative(int rows) throws SQLException {
checkScrollable();
if (onInsertRow) {
throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."),
PSQLState.INVALID_CURSOR_STATE);
}
// have to add 1 since absolute expects a 1-based index
return absolute(current_row + 1 + rows);
}
public void setFetchDirection(int direction) throws SQLException {
checkClosed();
switch (direction) {
case ResultSet.FETCH_FORWARD:
break;
case ResultSet.FETCH_REVERSE:
case ResultSet.FETCH_UNKNOWN:
checkScrollable();
break;
default:
throw new PSQLException(GT.tr("Invalid fetch direction constant: {0}.", direction),
PSQLState.INVALID_PARAMETER_VALUE);
}
this.fetchdirection = direction;
}
public synchronized void cancelRowUpdates() throws SQLException {
checkClosed();
if (onInsertRow) {
throw new PSQLException(GT.tr("Cannot call cancelRowUpdates() when on the insert row."),
PSQLState.INVALID_CURSOR_STATE);
}
if (doingUpdates) {
doingUpdates = false;
clearRowBuffer(true);
}
}
public synchronized void deleteRow() throws SQLException {
checkUpdateable();
if (onInsertRow) {
throw new PSQLException(GT.tr("Cannot call deleteRow() when on the insert row."),
PSQLState.INVALID_CURSOR_STATE);
}
if (isBeforeFirst()) {
throw new PSQLException(
GT.tr(
"Currently positioned before the start of the ResultSet. You cannot call deleteRow() here."),
PSQLState.INVALID_CURSOR_STATE);
}
if (isAfterLast()) {
throw new PSQLException(
GT.tr(
"Currently positioned after the end of the ResultSet. You cannot call deleteRow() here."),
PSQLState.INVALID_CURSOR_STATE);
}
if (rows.isEmpty()) {
throw new PSQLException(GT.tr("There are no rows in this ResultSet."),
PSQLState.INVALID_CURSOR_STATE);
}
int numKeys = primaryKeys.size();
if (deleteStatement == null) {
StringBuilder deleteSQL =
new StringBuilder("DELETE FROM ").append(onlyTable).append(tableName).append(" where ");
for (int i = 0; i < numKeys; i++) {
Utils.escapeIdentifier(deleteSQL, primaryKeys.get(i).name);
deleteSQL.append(" = ?");
if (i < numKeys - 1) {
deleteSQL.append(" and ");
}
}
deleteStatement = connection.prepareStatement(deleteSQL.toString());
}
deleteStatement.clearParameters();
for (int i = 0; i < numKeys; i++) {
deleteStatement.setObject(i + 1, primaryKeys.get(i).getValue());
}
deleteStatement.executeUpdate();
rows.remove(current_row);
current_row--;
moveToCurrentRow();
}
public synchronized void insertRow() throws SQLException {
checkUpdateable();
if (!onInsertRow) {
throw new PSQLException(GT.tr("Not on the insert row."), PSQLState.INVALID_CURSOR_STATE);
} else if (updateValues.isEmpty()) {
throw new PSQLException(GT.tr("You must specify at least one column value to insert a row."),
PSQLState.INVALID_PARAMETER_VALUE);
} else {
// loop through the keys in the insertTable and create the sql statement
// we have to create the sql every time since the user could insert different
// columns each time
StringBuilder insertSQL = new StringBuilder("INSERT INTO ").append(tableName).append(" (");
StringBuilder paramSQL = new StringBuilder(") values (");
Iterator columnNames = updateValues.keySet().iterator();
int numColumns = updateValues.size();
for (int i = 0; columnNames.hasNext(); i++) {
String columnName = columnNames.next();
Utils.escapeIdentifier(insertSQL, columnName);
if (i < numColumns - 1) {
insertSQL.append(", ");
paramSQL.append("?,");
} else {
paramSQL.append("?)");
}
}
insertSQL.append(paramSQL.toString());
insertStatement = connection.prepareStatement(insertSQL.toString());
Iterator