All Downloads are FREE. Search and download functionalities are using the official Maven repository.

net.java.ao.DatabaseProvider Maven / Gradle / Ivy

Go to download

This is the core library for Active Objects. It is generic and can be embedded in any environment. As such it is generic and won't contain all connection pooling, etc.

There is a newer version: 6.1.1
Show newest version
/*
 * Copyright 2007 Daniel Spiewak
 * 
 * Licensed under the Apache License, Version 2.0 (the "License"); 
 * you may not use this file except in compliance with the License. 
 * You may obtain a copy of the License at
 * 
 *	    http://www.apache.org/licenses/LICENSE-2.0 
 * 
 * Unless required by applicable law or agreed to in writing, software 
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package net.java.ao;

import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import net.java.ao.schema.IndexNameConverter;
import net.java.ao.schema.NameConverters;
import net.java.ao.schema.OnUpdate;
import net.java.ao.schema.SequenceNameConverter;
import net.java.ao.schema.TableNameConverter;
import net.java.ao.schema.TriggerNameConverter;
import net.java.ao.schema.ddl.DDLAction;
import net.java.ao.schema.ddl.DDLActionType;
import net.java.ao.schema.ddl.DDLField;
import net.java.ao.schema.ddl.DDLForeignKey;
import net.java.ao.schema.ddl.DDLIndex;
import net.java.ao.schema.ddl.DDLTable;
import net.java.ao.schema.ddl.DDLValue;
import net.java.ao.schema.ddl.SchemaReader;
import net.java.ao.sql.SqlUtils;
import net.java.ao.types.DatabaseType;
import net.java.ao.types.TypeManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.InputStream;
import java.sql.Blob;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;

import static com.google.common.base.Preconditions.*;
import static net.java.ao.Common.*;

/**
 * 

The superclass parent of all DatabaseProvider * implementations. Various implementations allow for an abstraction * around database-specific functionality (such as DDL). DatabaseProvider(s) * also handle the creation of new {@link Connection} instances and * fully encapsulate the raw JDBC driver. Any database-specific * code should be placed in the database provider, rather than embedded * within the API logic.

*

*

This superclass contains a base-line, default implementation of most * database-specific methods, thus requiring a minimum of work to implement * a new database provider. For the sake of sanity (read: mine), this * base-line implementation is basically specific to MySQL. Thus any * DatabaseProvider implementations are really specifying the * differences between the database in question and MySQL. To fully * utilize the default implementations provided in this class, this fact should * be kept in mind.

*

*

This class also handles the implementation details required to ensure * that only one active {@link Connection} instance is available per thread. This is * in fact a very basic (and naive) form of connection pooling. It should * not be relied upon for performance reasons. Instead one should enable * connection pooling via the {@link javax.sql.DataSource} passed to instances. You can * also use the net.java.ao.builder.EntityManagerBuilder builder to make it * easier to configure the pooling. The purpose of the thread-locked connection pooling * in this class is to satisfy transactions with external SQL statements.

* * @author Daniel Spiewak */ public abstract class DatabaseProvider { protected final Logger logger = LoggerFactory.getLogger(this.getClass()); protected final Logger sqlLogger = LoggerFactory.getLogger("net.java.ao.sql"); private final Set sqlListeners; private final ThreadLocal transactionThreadLocal = new ThreadLocal(); private final DisposableDataSource dataSource; private final String schema; private String quote; protected DatabaseProvider(DisposableDataSource dataSource, String schema) { this.dataSource = checkNotNull(dataSource); this.schema = isBlank(schema) ? null : schema; // can be null this.sqlListeners = new CopyOnWriteArraySet(); this.sqlListeners.add(new LoggingSqlListener(sqlLogger)); loadQuoteString(); } public String getSchema() { return schema; } private synchronized void loadQuoteString() { if (quote != null) { return; } Connection conn = null; try { conn = getConnection(); if (conn == null) { throw new IllegalStateException("Could not get connection to load quote String"); } quote = conn.getMetaData().getIdentifierQuoteString().trim(); } catch (SQLException e) { throw new RuntimeException("Unable to query the database", e); } finally { closeQuietly(conn); } } /** *

Generates the DDL fragment required to specify an INTEGER field as * auto-incremented. For databases which do not support such flags (which * is just about every database exception MySQL), "" is an * acceptable return value. This method should never return null * as it would cause the field rendering method to throw a {@link NullPointerException}.

*/ protected String renderAutoIncrement() { return "AUTO_INCREMENT"; } /** * Top level delegating method for the process of rendering a database-agnostic * {@link DDLAction} into the database-specific DDL statement(s). It is * doubtful that any implementations will have to override this method as the * default implementation is database-agnostic. * * @param nameConverters * @param action The database-agnostic action to render. * @return An array of DDL statements specific to the database in question. * @see #renderTable(DDLTable) * @see #renderFunctions(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable) * @see #renderTriggers(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.SequenceNameConverter, net.java.ao.schema.ddl.DDLTable) * @see #renderSequences(net.java.ao.schema.SequenceNameConverter, net.java.ao.schema.ddl.DDLTable) * @see #renderDropTriggers(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable) * @see #renderDropFunctions(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable) * @see #renderDropSequences(net.java.ao.schema.SequenceNameConverter, net.java.ao.schema.ddl.DDLTable) * @see #renderDropTable(DDLTable) * @see #renderAlterTableAddColumn(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.SequenceNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) * @see #renderAlterTableChangeColumn(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.SequenceNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField, net.java.ao.schema.ddl.DDLField) * @see #renderAlterTableDropColumn(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) * @see #renderAlterTableAddKey(DDLForeignKey) * @see #renderAlterTableDropKey(DDLForeignKey) */ public final String[] renderAction(NameConverters nameConverters, DDLAction action) { final List back = new ArrayList(); switch (action.getActionType()) { case CREATE: back.add(renderTable(action.getTable())); back.addAll(renderFunctions(nameConverters.getTriggerNameConverter(), action.getTable())); back.addAll(renderSequences(nameConverters.getSequenceNameConverter(), action.getTable())); back.addAll(renderTriggers(nameConverters.getTriggerNameConverter(), nameConverters.getSequenceNameConverter(), action.getTable())); for (DDLIndex index : action.getTable().getIndexes()) { DDLAction newAction = new DDLAction(DDLActionType.CREATE_INDEX); newAction.setIndex(index); back.addAll(Arrays.asList(renderAction(nameConverters, newAction))); } break; case DROP: for (DDLIndex index : action.getTable().getIndexes()) { DDLAction newAction = new DDLAction(DDLActionType.DROP_INDEX); newAction.setIndex(index); back.addAll(Arrays.asList(renderAction(nameConverters, newAction))); } back.addAll(renderDropTriggers(nameConverters.getTriggerNameConverter(), action.getTable())); back.addAll(renderDropSequences(nameConverters.getSequenceNameConverter(), action.getTable())); back.addAll(Arrays.asList(renderDropFunctions(nameConverters.getTriggerNameConverter(), action.getTable()))); back.add(renderDropTable(action.getTable())); break; case ALTER_ADD_COLUMN: back.addAll(Arrays.asList(renderAlterTableAddColumn(nameConverters.getTriggerNameConverter(), nameConverters.getSequenceNameConverter(), action.getTable(), action.getField()))); for (DDLIndex index : action.getTable().getIndexes()) { if (index.getField().equals(action.getField().getName())) { DDLAction newAction = new DDLAction(DDLActionType.CREATE_INDEX); newAction.setIndex(index); back.addAll(Arrays.asList(renderAction(nameConverters, newAction))); } } break; case ALTER_CHANGE_COLUMN: back.addAll(renderAlterTableChangeColumn(nameConverters.getTriggerNameConverter(), nameConverters.getSequenceNameConverter(), action.getTable(), action.getOldField(), action.getField())); break; case ALTER_DROP_COLUMN: for (DDLIndex index : action.getTable().getIndexes()) { if (index.getField().equals(action.getField().getName())) { DDLAction newAction = new DDLAction(DDLActionType.DROP_INDEX); newAction.setIndex(index); back.addAll(Arrays.asList(renderAction(nameConverters, newAction))); } } back.addAll(Arrays.asList(renderAlterTableDropColumn(nameConverters.getTriggerNameConverter(), action.getTable(), action.getField()))); break; case ALTER_ADD_KEY: back.add(renderAlterTableAddKey(action.getKey())); break; case ALTER_DROP_KEY: back.add(renderAlterTableDropKey(action.getKey())); break; case CREATE_INDEX: back.add(renderCreateIndex(nameConverters.getIndexNameConverter(), action.getIndex())); break; case DROP_INDEX: back.add(renderDropIndex(nameConverters.getIndexNameConverter(), action.getIndex())); break; case INSERT: back.add(renderInsert(action.getTable(), action.getValues())); } return back.toArray(new String[back.size()]); } /** *

Top level delegating method for rendering a database-agnostic * {@link Query} object into its (potentially) database-specific * query statement. This method invokes the various renderQuery* * methods to construct its output, thus it is doubtful that any subclasses * will have to override it. Rather, one of the delegate methods * should be considered.

*

*

An example of a database-specific query rendering would be the * following Query:

*

*

Query.select().from(Person.class).limit(10)
*

*

On MySQL, this would render to SELECT id FROM people LIMIT 10 * However, on SQL Server, this same Query would render as * SELECT TOP 10 id FROM people

* * @param query The database-agnostic Query object to be rendered in a * potentially database-specific way. * @param converter Used to convert {@link Entity} classes into table names. * @param count If true, render the Query as a SELECT COUNT(*) * rather than a standard field-data query. * @return A syntactically complete SQL statement potentially specific to the * database. * @see #renderQuerySelect(Query, TableNameConverter, boolean) * @see #renderQueryJoins(Query, TableNameConverter) * @see #renderQueryWhere(Query) * @see #renderQueryGroupBy(Query) * @see #renderQueryOrderBy(Query) * @see #renderQueryLimit(Query) */ public String renderQuery(Query query, TableNameConverter converter, boolean count) { StringBuilder sql = new StringBuilder(); sql.append(renderQuerySelect(query, converter, count)); sql.append(renderQueryJoins(query, converter)); sql.append(renderQueryWhere(query)); sql.append(renderQueryGroupBy(query)); sql.append(renderQueryOrderBy(query)); sql.append(renderQueryLimit(query)); return sql.toString(); } /** *

Parses the database-agnostic String value relevant to the specified SQL * type in int form (as defined by {@link Types} and returns * the Java value which corresponds. This method is completely database-agnostic, as are * all of all of its delegate methods.

*

*

WARNING: This method is being considered for removal to another * class (perhaps {@link TypeManager}?) as it is not a database-specific function and thus * confuses the purpose of this class. Do not rely upon it heavily. (better yet, don't rely on it * at all from external code. It's not designed to be part of the public API)

* * @param type The JDBC integer type of the database field against which to parse the * value. * @param value The database-agnostic String value to parse into a proper Java object * with respect to the specified SQL type. * @return A Java value which corresponds to the specified String. */ public Object parseValue(int type, String value) { if (value == null || value.equals("NULL")) { return null; } List posFuncs = new ArrayList(); for (DatabaseFunction func : DatabaseFunction.values()) { if (renderFunction(func).equalsIgnoreCase(value)) { posFuncs.add(func); } } if (posFuncs.size() > 0) { return posFuncs.toArray(new DatabaseFunction[posFuncs.size()]); } try { switch (type) { case Types.BIGINT: return Long.parseLong(value); case Types.BIT: return Byte.parseByte(value); case Types.BOOLEAN: int intValue = -1; try { intValue = Integer.parseInt(value); } catch (Throwable t) { return Boolean.parseBoolean(value); } return intValue == 0; case Types.CHAR: value.charAt(0); break; case Types.DATE: try { Calendar back = Calendar.getInstance(); back.setTime(new SimpleDateFormat(getDateFormat()).parse(value)); return back; } catch (ParseException e) { return null; } case Types.DECIMAL: return Double.parseDouble(value); case Types.DOUBLE: return Double.parseDouble(value); case Types.FLOAT: return Float.parseFloat(value); case Types.INTEGER: return Integer.parseInt(value); case Types.NUMERIC: return Integer.parseInt(value); case Types.REAL: return Double.parseDouble(value); case Types.SMALLINT: return Short.parseShort(value); case Types.TIMESTAMP: try { Calendar back = Calendar.getInstance(); back.setTime(new SimpleDateFormat(getDateFormat()).parse(value)); return back; } catch (ParseException e) { return null; } case Types.TINYINT: return Short.parseShort(value); case Types.VARCHAR: return value; } } catch (Throwable t) { } return null; } /** *

Allows the provider to set database-specific options on a * {@link Statement} instance prior to its usage in a SELECT * query. This is to allow things like emulation of the * LIMIT feature on databases which don't support it within * the SQL implementation.

*

*

This method is only called on SELECTs.

* * @param stmt The instance against which the properties * should be set. * @param query The query which is being executed against * the statement instance. */ public void setQueryStatementProperties(Statement stmt, Query query) throws SQLException { } /** * Allows the provider to set database-specific options on a * {@link ResultSet} instance prior to its use by the library. * This allows for features such as row offsetting even on * databases that don't support it (such as Oracle, Derby, * etc). * * @param res The ResultSet to modify. * @param query The query instance which was run to produce * the result set. */ public void setQueryResultSetProperties(ResultSet res, Query query) throws SQLException { } /** *

Returns a result set of all of the tables (and associated * meta) in the database. The fields of the result set must * correspond with those specified in the * DatabaseMetaData#getTables(String, String, String, String[]) * method. In fact, the default implementation merely calls * this method passing (null, null, "", null). * For databases (such as PostgreSQL) where this is unsuitable, * different parameters can be specified to the getTables * method in the override, or an entirely new implementation * written, as long as the result set corresponds in fields to * the JDBC spec.

* * @param conn The connection to use in retrieving the database tables. * @return A result set of tables (and meta) corresponding in fields * to the JDBC specification. * @see java.sql.DatabaseMetaData#getTables(String, String, String, String[]) */ public ResultSet getTables(Connection conn) throws SQLException { return conn.getMetaData().getTables(null, schema, "", new String[]{"TABLE"}); } public ResultSet getSequences(Connection conn) throws SQLException { return conn.getMetaData().getTables(null, schema, "", new String[]{"SEQUENCE"}); } public ResultSet getImportedKeys(Connection connection, String tableName) throws SQLException { return connection.getMetaData().getImportedKeys(null, schema, tableName); } /** *

Renders the SELECT portion of a given {@link Query} instance in the * manner required by the database-specific SQL implementation. Usually, * this is as simple as "SELECT id FROM table" or "SELECT DISTINCT * * FROM table". However, some databases require the limit and offset * parameters to be specified as part of the SELECT clause. For example, * on HSQLDB, a Query for the "id" field limited to 10 rows would render * SELECT like this: SELECT TOP 10 id FROM table.

*

*

There is usually no need to call this method directly. Under normal * operations it functions as a delegate for {@link #renderQuery(Query, TableNameConverter, boolean)}.

* * @param query The Query instance from which to determine the SELECT properties. * @param converter The name converter to allow conversion of the query entity * interface into a proper table name. * @param count Whether or not the query should be rendered as a SELECT COUNT(*). * @return The database-specific SQL rendering of the SELECT portion of the query. */ protected String renderQuerySelect(Query query, TableNameConverter converter, boolean count) { StringBuilder sql = new StringBuilder(); String tableName = query.getTable(); if (tableName == null) { tableName = converter.getName(query.getTableType()); } switch (query.getType()) { case SELECT: sql.append("SELECT "); if (query.isDistinct()) { sql.append("DISTINCT "); } if (count) { sql.append("COUNT(*)"); } else { StringBuilder fields = new StringBuilder(); for (String field : query.getFields()) { fields.append(processID(field)).append(','); } if (query.getFields().length > 0) { fields.setLength(fields.length() - 1); } sql.append(fields); } sql.append(" FROM "); sql.append(withSchema(tableName)); break; } return sql.toString(); } /** *

Renders the JOIN portion of the query in the database-specific SQL * dialect. Very few databases deviate from the standard in this matter, * thus the default implementation is usually sufficient.

*

*

An example return value: " JOIN table1 ON table.id = table1.value"

*

*

There is usually no need to call this method directly. Under normal * operations it functions as a delegate for {@link #renderQuery(Query, TableNameConverter, boolean)}.

* * @param query The Query instance from which to determine the JOIN properties. * @param converter The name converter to allow conversion of the query entity * interface into a proper table name. * @return The database-specific SQL rendering of the JOIN portion of the query. */ protected String renderQueryJoins(Query query, TableNameConverter converter) { final StringBuilder sql = new StringBuilder(); for (Map.Entry>, String> joinEntry : query.getJoins().entrySet()) { sql.append(" JOIN ").append(withSchema(converter.getName(joinEntry.getKey()))); if (joinEntry.getValue() != null) { sql.append(" ON ").append(processOnClause(joinEntry.getValue())); } } return sql.toString(); } /** *

Renders the WHERE portion of the query in the database-specific SQL * dialect. Very few databases deviate from the standard in this matter, * thus the default implementation is usually sufficient.

*

*

An example return value: " WHERE name = ? OR age < 20"

*

*

There is usually no need to call this method directly. Under normal * operations it functions as a delegate for {@link #renderQuery(Query, TableNameConverter, boolean)}.

* * @param query The Query instance from which to determine the WHERE properties. * @return The database-specific SQL rendering of the WHERE portion of the query. */ protected String renderQueryWhere(Query query) { StringBuilder sql = new StringBuilder(); String whereClause = query.getWhereClause(); if (whereClause != null) { sql.append(" WHERE "); sql.append(processWhereClause(whereClause)); } return sql.toString(); } /** *

Renders the GROUP BY portion of the query in the database-specific SQL * dialect. Very few databases deviate from the standard in this matter, * thus the default implementation is usually sufficient.

*

*

An example return value: " GROUP BY name"

*

*

There is usually no need to call this method directly. Under normal * operations it functions as a delegate for {@link #renderQuery(Query, TableNameConverter, boolean)}.

* * @param query The Query instance from which to determine the GROUP BY properties. * @return The database-specific SQL rendering of the GROUP BY portion of the query. */ protected String renderQueryGroupBy(Query query) { StringBuilder sql = new StringBuilder(); String groupClause = query.getGroupClause(); if (groupClause != null) { sql.append(" GROUP BY "); sql.append(processGroupByClause(groupClause)); } return sql.toString(); } private String processGroupByClause(String groupBy) { return SqlUtils.GROUP_BY_CLAUSE.matcher(groupBy).replaceAll(processID("$1")); } /** *

Renders the ORDER BY portion of the query in the database-specific SQL * dialect. Very few databases deviate from the standard in this matter, * thus the default implementation is usually sufficient.

*

*

An example return value: " ORDER BY name ASC"

*

*

There is usually no need to call this method directly. Under normal * operations it functions as a delegate for {@link #renderQuery(Query, TableNameConverter, boolean)}.

* * @param query The Query instance from which to determine the ORDER BY properties. * @return The database-specific SQL rendering of the ORDER BY portion of the query. */ protected String renderQueryOrderBy(Query query) { StringBuilder sql = new StringBuilder(); String orderClause = query.getOrderClause(); if (orderClause != null) { sql.append(" ORDER BY "); sql.append(processOrderClause(orderClause)); } return sql.toString(); } private String processOrderClause(String order) { return SqlUtils.ORDER_CLAUSE.matcher(order).replaceFirst(processID("$1")); } /** *

Renders the LIMIT portion of the query in the database-specific SQL * dialect. There is wide variety in database implementations of this * particular SQL clause. In fact, many database do not support it at all. * If the database in question does not support LIMIT, this method should * be overridden to return an empty String. For such databases, LIMIT * should be implemented by overriding {@link #setQueryResultSetProperties(ResultSet, Query)} * and {@link #setQueryStatementProperties(Statement, Query)}.

*

*

An example return value: " LIMIT 10,2"

*

*

There is usually no need to call this method directly. Under normal * operations it functions as a delegate for {@link #renderQuery(Query, TableNameConverter, boolean)}.

* * @param query The Query instance from which to determine the LIMIT properties. * @return The database-specific SQL rendering of the LIMIT portion of the query. */ protected String renderQueryLimit(Query query) { StringBuilder sql = new StringBuilder(); int limit = query.getLimit(); if (limit >= 0) { sql.append(" LIMIT "); sql.append(limit); } int offset = query.getOffset(); if (offset > 0) { sql.append(" OFFSET ").append(offset); } return sql.toString(); } /** *

Retrieves a JDBC {@link Connection} instance which corresponds * to the database represented by the provider instance. This Connection * can be used to execute arbitrary JDBC operations against the database. * Also, this is the method used by the whole of ActiveObjects itself to * get database connections when required.

* *

All {@link Connection} instances are pooled internally by thread. * Thus, there is never more than one connection per thread. This is * necessary to allow arbitrary JDBC operations within a transaction * without breaking transaction integrity. Developers using this method * should bear this fact in mind and consider the {@link Connection} * instance immutable. The only exception is if one is absolutely * certain that the JDBC code in question is not being executed within * a transaction.

* *

Despite the fact that there is only a single connection per thread, * the {@link Connection} instances returned from this method should * still be treated as bona fide JDBC connections. They can and * should be closed when their usage is complete. This is * especially important when actual connection pooling is in use and * non-disposal of connections can lead to a crash as the connection * pool runs out of resources. The developer need not concern themselves * with the single-connection-per-thread issue when closing the connection * as the call to close() will be intercepted and ignored * if necessary.

*

* * @return A new connection to the database */ public final Connection getConnection() throws SQLException { Connection c = transactionThreadLocal.get(); if (c != null) { if (!c.isClosed()) { return c; } else { transactionThreadLocal.remove(); // remove the reference to the connection } } final Connection connectionImpl = dataSource.getConnection(); if (connectionImpl == null) { throw new SQLException("Unable to create connection"); } c = DelegateConnectionHandler.newInstance(connectionImpl); setPostConnectionProperties(c); return c; } public final Connection startTransaction() throws SQLException { final Connection c = getConnection(); setCloseable(c, false); c.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); c.setAutoCommit(false); transactionThreadLocal.set(c); return c; } public final Connection commitTransaction(Connection c) throws SQLException { checkState(c == transactionThreadLocal.get(), "There are two concurrently open transactions!"); checkState(c != null, "Tried to commit a transaction that is not started!"); c.commit(); transactionThreadLocal.remove(); return c; } public final void rollbackTransaction(Connection c) throws SQLException { checkState(c == transactionThreadLocal.get(), "There are two concurrently open transactions!"); checkState(c != null, "Tried to rollback a transaction that is not started!"); c.rollback(); } void setCloseable(Connection connection, boolean closeable) { if (connection != null && connection instanceof DelegateConnection) { ((DelegateConnection) connection).setCloseable(closeable); } } /** * Frees any resources held by the database provider or delegate * libraries (such as connection pools). This method should be * once usage of the provider is complete to ensure that all * connections are committed and closed. */ public void dispose() { dataSource.dispose(); } /** * Called to make any post-creation modifications to a new * {@link Connection} instance. This is used for databases * such as Derby which require the schema to be set after * the connection is created. * * @param conn The connection to modify according to the database * requirements. */ protected void setPostConnectionProperties(Connection conn) throws SQLException { } /** * Renders the foreign key constraints in database-specific DDL for * the table in question. Actually, this method only loops through * the foreign keys and renders indentation and line-breaks. The * actual rendering is done in a second delegate method. * * @param table The database-agnostic DDL representation of the table * in question. * @return The String rendering of all of the foreign keys for * the table. * @see #renderForeignKey(DDLForeignKey) */ protected String renderConstraintsForTable(DDLTable table) { StringBuilder back = new StringBuilder(); for (DDLForeignKey key : table.getForeignKeys()) { back.append(" ").append(renderForeignKey(key)).append(",\n"); } return back.toString(); } /** * Renders the specified foreign key representation into the * database-specific DDL. The implementation must name the * foreign key according to the DDLForeignKey#getFKName() * value otherwise migrations will no longer function appropriately. * * @param key The database-agnostic foreign key representation. * @return The database-pecific DDL fragment corresponding to the * foreign key in question. */ protected String renderForeignKey(DDLForeignKey key) { StringBuilder back = new StringBuilder(); back.append("CONSTRAINT ").append(processID(key.getFKName())); back.append(" FOREIGN KEY (").append(processID(key.getField())).append(") REFERENCES "); back.append(withSchema(key.getTable())).append('(').append(processID(key.getForeignField())).append(")"); return back.toString(); } /** * Converts the specified type into the database-specific DDL String * value. By default, this delegates to the DatabaseType#getDefaultName() * method. Subclass implementations should be sure to make a super * call in order to ensure that both default naming and future special * cases are handled appropriately. * * @param type The type instance to convert to a DDL string. * @return The database-specific DDL representation of the type (e.g. "VARCHAR"). * @see net.java.ao.types.DatabaseType#getDefaultName() */ protected String convertTypeToString(DatabaseType type) { return type.getDefaultName(); } /** * Renders the specified table representation into the corresponding * database-specific DDL statement. For legacy reasons, this only allows * single-statement table creation. Additional statements (triggers, * functions, etc) must be created in one of the other delegate methods * for DDL creation. This method does a great deal of delegation to * other DatabaseProvider methods for functions such as * field rendering, foreign key rendering, etc. * * @param table The database-agnostic table representation. * @return The database-specific DDL statements which correspond to the * specified table creation. */ protected String renderTable(DDLTable table) { StringBuilder back = new StringBuilder("CREATE TABLE "); back.append(withSchema(table.getName())); back.append(" (\n"); List primaryKeys = new LinkedList(); StringBuilder append = new StringBuilder(); for (DDLField field : table.getFields()) { back.append(" ").append(renderField(table, field, new RenderFieldOptions(true, true))).append(",\n"); if (field.isPrimaryKey()) { primaryKeys.add(field.getName()); } } // TODO commented out contraints append.append(renderConstraintsForTable(table)); back.append(append); if (primaryKeys.size() > 1) { throw new RuntimeException("Entities may only have one primary key"); } if (primaryKeys.size() > 0) { back.append(" PRIMARY KEY("); back.append(processID(primaryKeys.get(0))); back.append(")\n"); } back.append(")"); String tailAppend = renderAppend(); if (tailAppend != null) { back.append(' '); back.append(tailAppend); } return back.toString(); } protected String renderInsert(DDLTable ddlTable, DDLValue[] ddlValues) { final StringBuilder columns = new StringBuilder(); final StringBuilder values = new StringBuilder(); for (DDLValue v : ddlValues) { columns.append(processID(v.getField().getName())).append(","); values.append(renderValue(v.getValue())).append(","); } columns.deleteCharAt(columns.length() - 1); values.deleteCharAt(values.length() - 1); return new StringBuilder() .append("INSERT INTO ").append(withSchema(ddlTable.getName())) .append("(").append(columns).append(")") .append(" VALUES (").append(values).append(")") .toString(); } /** * Generates the appropriate database-specific DDL statement to * drop the specified table representation. The default implementation * is merely "DROP TABLE tablename". This is suitable * for every database that I am aware of. Any dependant database * objects (such as triggers, functions, etc) must be rendered in * one of the other delegate methods (such as renderDropTriggers(DDLTable)). * * @param table The table representation which is to be dropped. * @return A database-specific DDL statement which drops the specified * table. */ protected String renderDropTable(DDLTable table) { return "DROP TABLE " + withSchema(table.getName()); } /** * Generates the database-specific DDL statements required to drop all * associated functions for the given table representation. The default * implementation is to return an empty array. Some databases (such * as PostgreSQL) require triggers to fire functions, unlike most * databases which allow triggers to function almost like standalone * functions themselves. For such databases, dropping a table means * not only dropping the table and the associated triggers, but also * the functions associated with the triggers themselves. * * * @param triggerNameConverter * @param table The table representation against which all functions which * correspond (directly or indirectly) must be dropped. * @return An array of database-specific DDL statement(s) which drop the * required functions. */ protected String[] renderDropFunctions(TriggerNameConverter triggerNameConverter, DDLTable table) { return new String[0]; } /** * Generates the database-specific DDL statements required to drop all * associated triggers for the given table representation. The default * implementation is to return an empty array. Most databases require * the @OnUpdate function to be implemented using triggers * explicitly (rather than the implicit MySQL syntax). For such * databases, some tables will thus have triggers which are associated * directly with the table. It is these triggers which must be * dropped prior to the dropping of the table itself. For databases * which associate functions with triggers (such as PostgreSQL), these * functions will be dropped using another delegate method and need * not be dealt with in this method's implementation. * * * * @param triggerNameConverter * @param table The table representation against which all triggers which * correspond (directly or indirectly) must be dropped. * @return An array of database-specific DDL statement(s) which drop the * required triggers. */ protected List renderDropTriggers(TriggerNameConverter triggerNameConverter, DDLTable table) { return ImmutableList.of(); } /** * Generates the database-specific DDL statements required to drop all * associated sequences for the given table representation. The default * implementation is to return an empty array. This is an Oracle specific * method used for primary key management * * @param sequenceNameConverter * @param table The table representation against which all triggers which * correspond (directly or indirectly) must be dropped. * @return An array of database-specific DDL statement(s) which drop the * required triggers. */ protected List renderDropSequences(SequenceNameConverter sequenceNameConverter, DDLTable table) { return ImmutableList.of(); } /** *

Generates the database-specific DDL statements required to create * all of the functions necessary for the given table. For most * databases, this will simply return an empty array. The * functionality is required for databases such as PostgreSQL which * require a function to be explicitly declared and associated when * a trigger is created.

*

*

Most of the work for this functionality is delegated to the * {@link #renderFunctionForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField)} method.

* * * * @param triggerNameConverter * @param table The table for which the functions must be generated. * @return An array of DDL statements to execute. */ protected List renderFunctions(final TriggerNameConverter triggerNameConverter, final DDLTable table) { return renderFields(table, Predicates.alwaysTrue(), new Function() { @Override public String apply(DDLField field) { return renderFunctionForField(triggerNameConverter, table, field); } }); } protected final List renderFields(DDLTable table, Predicate filter, Function render) { final Iterable fields = Lists.newArrayList(table.getFields()); return ImmutableList.copyOf( Iterables.filter( Iterables.transform( Iterables.filter(fields, filter), render), Predicates.notNull())); } /** *

Generates the database-specific DDL statements required to create * all of the triggers necessary for the given table. For MySQL, this * will likely return an empty array. The functionality is required * for databases which do not provide an implicit syntax for the * @OnUpdate functionality. In MySQL, it is possible to * provide this functionality with the * field TIMESTAMP ON UPDATE CURRENT_DATE style syntax. * This syntax is not common to all databases, hence triggers must be * used to provide the functionality.

*

*

Most of the work for this functionality is delegated to the * {@link #renderTriggerForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.SequenceNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField)} method.

* * * * @param triggerNameConverter * @param sequenceNameConverter *@param table The table for which the triggers must be generated. @return An array of DDL statements to execute. */ protected List renderTriggers(final TriggerNameConverter triggerNameConverter, final SequenceNameConverter sequenceNameConverter, final DDLTable table) { return renderFields( table, Predicates.alwaysTrue(), new Function() { @Override public String apply(DDLField field) { return renderTriggerForField(triggerNameConverter, sequenceNameConverter, table, field); } }); } /** *

Generates the database-specific DDL statements required to create * all of the sequences necessary for the given table.

* * @param sequenceNameConverter the naming strategy for sequences * @param table The table for which the triggers must be generated. * @return DDL statements to execute. */ protected List renderSequences(SequenceNameConverter sequenceNameConverter, DDLTable table) { return ImmutableList.of(); } /** * Generates the database-specific DDL statements required to add * a column to an existing table. Included in the return value * should be the statements required to add all necessary functions * and triggers to ensure that the column acts appropriately. For * example, if the field is tagged with an @OnUpdate * annotation, chances are there will be a trigger and possibly a * function along with the ALTER statement. These "extra" * functions are properly ordered and will only be appended if * their values are not null. Because of this, very * few database providers will need to override this method. * * * * @param triggerNameConverter * @param sequenceNameConverter *@param table The table which should receive the new column. * @param field The column to add to the specified table. @return An array of DDL statements to execute. * @see #renderFunctionForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) * @see #renderTriggerForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.SequenceNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) */ protected String[] renderAlterTableAddColumn(TriggerNameConverter triggerNameConverter, SequenceNameConverter sequenceNameConverter, DDLTable table, DDLField field) { List back = new ArrayList(); back.add("ALTER TABLE " + withSchema(table.getName()) + " ADD COLUMN " + renderField(table, field, new RenderFieldOptions(true, true))); String function = renderFunctionForField(triggerNameConverter, table, field); if (function != null) { back.add(function); } final String trigger = renderTriggerForField(triggerNameConverter, sequenceNameConverter, table, field); if (trigger != null) { back.add(trigger); } return back.toArray(new String[back.size()]); } /** *

Generates the database-specific DDL statements required to change * the given column from its old specification to the given DDL value. * This method will also generate the appropriate statements to remove * old triggers and functions, as well as add new ones according to the * requirements of the new field definition.

*

*

The default implementation of this method functions in the manner * specified by the MySQL database. Some databases will have to perform * more complicated actions (such as dropping and re-adding the field) * in order to satesfy the same use-case. Such databases should print * a warning to stderr to ensure that the end-developer is aware of * such restrictions.

*

*

Thus, the specification for this method allows for data * loss. Nevertheless, if the database supplies a mechanism to * accomplish the task without data loss, it should be applied.

*

*

For maximum flexibility, the default implementation of this method * only deals with the dropping and addition of functions and triggers. * The actual generation of the ALTER TABLE statement is done in the * {@link #renderAlterTableChangeColumnStatement(net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField, net.java.ao.schema.ddl.DDLField, net.java.ao.DatabaseProvider.RenderFieldOptions)} * method.

* * * * * @param triggerNameConverter * @param sequenceNameConverter *@param table The table containing the column to change. * @param oldField The old column definition. * @param field The new column definition (defining the resultant DDL). @return An array of DDL statements to be executed. * @see #getTriggerNameForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) * @see #getFunctionNameForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) * @see #renderFunctionForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) * @see #renderTriggerForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.SequenceNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) */ protected List renderAlterTableChangeColumn(TriggerNameConverter triggerNameConverter, SequenceNameConverter sequenceNameConverter, DDLTable table, DDLField oldField, DDLField field) { final List back = new ArrayList(); final String trigger = getTriggerNameForField(triggerNameConverter, table, oldField); if (trigger != null) { back.add(new StringBuilder().append("DROP TRIGGER ").append(processID(trigger)).toString()); } final String function = getFunctionNameForField(triggerNameConverter, table, oldField); if (function != null) { back.add(new StringBuilder().append("DROP FUNCTION ").append(processID(function)).toString()); } back.add(renderAlterTableChangeColumnStatement(table, oldField, field, renderFieldOptionsInAlterColumn())); final String toRenderFunction = renderFunctionForField(triggerNameConverter, table, field); if (toRenderFunction != null) { back.add(toRenderFunction); } final String toRenderTrigger = renderTriggerForField(triggerNameConverter, sequenceNameConverter, table, field); if (toRenderTrigger != null) { back.add(toRenderTrigger); } return back; } protected RenderFieldOptions renderFieldOptionsInAlterColumn() { return new RenderFieldOptions(true, true, true); } /** * Generates the database-specific DDL statement only for altering a table and * changing a column. This method must only generate a single statement as it * does not need to concern itself with functions or triggers associated with * the column. This method is only to be called as a delegate for the * {@link #renderAlterTableChangeColumn(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.SequenceNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField, net.java.ao.schema.ddl.DDLField)} method, * for which it is a primary delegate. The default implementation of this * method functions according to the MySQL specification. * * @param table The table containing the column to change. * @param oldField The old column definition. * @param field The new column definition (defining the resultant DDL). * @param options * @return A single DDL statement which is to be executed. * @see #renderField(net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField, DatabaseProvider.RenderFieldOptions) */ protected String renderAlterTableChangeColumnStatement(DDLTable table, DDLField oldField, DDLField field, RenderFieldOptions options) { StringBuilder current = new StringBuilder(); current.append("ALTER TABLE ").append(withSchema(table.getName())).append(" CHANGE COLUMN "); current.append(processID(oldField.getName())).append(' '); current.append(renderField(table, field, options)); return current.toString(); } /** * Generates the database-specific DDL statements required to remove * the specified column from the given table. This should also * generate the necessary statements to drop all triggers and functions * associated with the column in question. If the database being * implemented has a non-standard syntax for dropping functions and/or * triggers, it may be required to override this method, even if the * syntax to drop columns is standard. * * * @param triggerNameConverter * @param table The table from which to drop the column. * @param field The column definition to remove from the table. * @return An array of DDL statements to be executed. * @see #getTriggerNameForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) * @see #getFunctionNameForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) */ protected String[] renderAlterTableDropColumn(TriggerNameConverter triggerNameConverter, DDLTable table, DDLField field) { List back = new ArrayList(); StringBuilder current = new StringBuilder(); String trigger = getTriggerNameForField(triggerNameConverter, table, field); if (trigger != null) { current.setLength(0); current.append("DROP TRIGGER ").append(processID(trigger)); back.add(current.toString()); } String function = getFunctionNameForField(triggerNameConverter, table, field); if (function != null) { current.setLength(0); current.append("DROP FUNCTION ").append(processID(function)); back.add(current.toString()); } current.setLength(0); current.append("ALTER TABLE ").append(withSchema(table.getName())).append( " DROP COLUMN ").append(processID(field.getName())); back.add(current.toString()); return back.toArray(new String[back.size()]); } /** * Generates the database-specific DDL statement required to add a * foreign key to a table. For databases which do not support such * a statement, a warning should be printed to stderr and a * null value returned. * * @param key The foreign key to be added. As this instance contains * all necessary data (such as domestic table, field, etc), no * additional parameters are required. * @return A DDL statement to be executed, or null. * @see #renderForeignKey(DDLForeignKey) */ protected String renderAlterTableAddKey(DDLForeignKey key) { StringBuilder back = new StringBuilder(); back.append("ALTER TABLE ").append(withSchema(key.getDomesticTable())).append(" ADD "); back.append(renderForeignKey(key)); return back.toString(); } /** * Generates the database-specific DDL statement required to remove a * foreign key from a table. For databases which do not support such * a statement, a warning should be printed to stderr and a * null value returned. This method assumes that the * {@link #renderForeignKey(DDLForeignKey)} method properly names * the foreign key according to the {@link DDLForeignKey#getFKName()} * method. * * @param key The foreign key to be removed. As this instance contains * all necessary data (such as domestic table, field, etc), no * additional parameters are required. * @return A DDL statement to be executed, or null. */ protected String renderAlterTableDropKey(DDLForeignKey key) { return "ALTER TABLE " + withSchema(key.getDomesticTable()) + " DROP FOREIGN KEY " + processID(key.getFKName()); } /** * Generates the database-specific DDL statement required to create * a new index. The syntax for this operation is highly standardized * and thus it is unlikely this method will be overridden. If the * database in question does not support indexes, a warning should * be printed to stderr and null returned. * * * * @param indexNameConverter * @param index The index to create. This single instance contains all * of the data necessary to create the index, thus no separate * parameters (such as a DDLTable) are required. * @return A DDL statement to be executed, or null. */ protected String renderCreateIndex(IndexNameConverter indexNameConverter, DDLIndex index) { StringBuilder back = new StringBuilder(); back.append("CREATE INDEX ").append(processID(indexNameConverter.getName(index.getTable(), index.getField()))); back.append(" ON ").append(withSchema(index.getTable())).append('(').append(processID(index.getField())).append(')'); return back.toString(); } /** * Generates the database-specific DDL statement required to drop * an index. The syntax for this operation is highly standardized * and thus it is unlikely this method will be overridden. If the * database in question does not support indexes, a warning should * be printed to stderr and null returned. * * * * @param indexNameConverter * @param index The index to drop. This single instance contains all * of the data necessary to drop the index, thus no separate * parameters (such as a DDLTable) are required. * @return A DDL statement to be executed, or null. */ protected String renderDropIndex(IndexNameConverter indexNameConverter, DDLIndex index) { StringBuilder back = new StringBuilder(); back.append("DROP INDEX ").append(processID(indexNameConverter.getName(index.getTable(), index.getField()))); back.append(" ON ").append(withSchema(index.getTable())); return back.toString(); } /** *

Generates any database-specific options which must be appended * to the end of a table definition. The only database I am aware * of which requires this is MySQL. For example:

*

*

CREATE TABLE test (
     *     id INTEGER NOT NULL AUTO_INCREMENT,
     *     name VARCHAR(45),
     *     PRIMARY KEY(id)
     * ) ENGINE=InnoDB;
*

*

The "ENGINE=InnoDB" clause is what is returned by * this method. The default implementation simply returns * null, signifying that no append should be rendered.

* * @return A DDL clause to be appended to the CREATE TABLE DDL, or null */ protected String renderAppend() { return null; } /** *

Generates the database-specific DDL fragment required to render the * field and its associated type. This includes all field attributes, * such as @NotNull, @AutoIncrement (if * supported by the database at the field level) and so on. Sample * return value:

*

*

name VARCHAR(255) DEFAULT "Skye" NOT NULL
*

*

Certain databases don't allow defined precision for certain types * (such as Derby and INTEGER). The logic for whether or not to render * precision should not be within this method, but delegated to the * {@link #considerPrecision(DDLField)} method.

*

*

Almost all functionality within this method is delegated to other * methods within the implementation. As such, it is almost never * necessary to override this method directly. An exception to this * would be a database like PostgreSQL which requires a different type * for auto-incremented fields.

* * @param table * @param field The field to be rendered. * @param options * @return A DDL fragment to be embedded in a statement elsewhere. */ protected String renderField(DDLTable table, DDLField field, RenderFieldOptions options) { StringBuilder back = new StringBuilder(); back.append(processID(field.getName())); back.append(" "); back.append(renderFieldType(field)); back.append(renderFieldPrecision(field)); if (field.isAutoIncrement()) { String autoIncrementValue = renderAutoIncrement(); if (!autoIncrementValue.trim().equals("")) { back.append(' ').append(autoIncrementValue); } } else if ((options.forceNull && !field.isNotNull() && !field.isUnique() && !field.isPrimaryKey()) || (options.renderDefault && field.getDefaultValue() != null)) { back.append(renderFieldDefault(table, field)); } if (options.renderUnique && field.isUnique()) { final String renderUniqueString = renderUnique(); if (!renderUniqueString.trim().equals("")) { back.append(' ').append(renderUniqueString); } } if (field.isNotNull() || field.isUnique()) { back.append(" NOT NULL"); } if (field.getOnUpdate() != null) { back.append(renderOnUpdate(field)); } return back.toString(); } protected String renderFieldDefault(DDLTable table, DDLField field) { return new StringBuilder().append(" DEFAULT ").append(renderValue(field.getDefaultValue())).toString(); } /** *

Renders the statement fragment for the given field representative of * its precision only. Consider the following statement:

*

* ALTER TABLE ADD COLUMN name VARCHAR(255) *

*

In this statement, the bit which is rendered by this method is the * "(255)" (without quotes). This is intended to allow * maximum flexibility in field type rendering (as required by PostgreSQL * and others which sometimes render types separately from the rest of * the field info). The default implementation should suffice for every * conceivable database. Any sort of odd functionality relating to * type precision rendering should be handled in the {@link #considerPrecision(DDLField)} * method if possible.

* * @param field The field for which the precision must be rendered. * @return A DDL fragment which will be concatenated into a statement later. */ protected String renderFieldPrecision(DDLField field) { StringBuilder back = new StringBuilder(); if (considerPrecision(field) && field.getPrecision() > 0) { back.append('('); if (field.getScale() > 0) { back.append(field.getPrecision()); back.append(','); back.append(field.getScale()); } else { back.append(field.getPrecision()); } back.append(')'); } return back.toString(); } /** * Renders the given Java instance in a database-specific way. This * method handles special cases such as {@link Calendar}, * {@link Boolean} (which is always rendered as 0/1), functions, * null and numbers. All other values are rendered (by * default) as 'value.toString()' (the String value * enclosed within single quotes). Implementations are encouraged to * override this method as necessary. * * @param value The Java instance to be rendered as a database literal. * @return The database-specific String rendering of the instance in * question. * @see #renderCalendar(Calendar) * @see #renderFunction(DatabaseFunction) */ protected String renderValue(Object value) { if (value == null) { return "NULL"; } else if (value instanceof Calendar) { return "'" + renderCalendar((Calendar) value) + "'"; } else if (value instanceof Boolean) { return ((Boolean) value ? "1" : "0"); } else if (value instanceof DatabaseFunction) { return renderFunction((DatabaseFunction) value); } else if (value instanceof Number) { return value.toString(); } return "'" + value.toString() + "'"; } /** * Renders the provided {@link Calendar} instance as a TIMESTAMP literal * in the database-specific format. The return value should not * be enclosed within quotes, as this is accomplished within other * functions when rendering is required. This method is actually a * boiler-plate usage of the {@link SimpleDateFormat} class, using the * date format defined within the {@link #getDateFormat()} method. * * @param calendar The time instance to be rendered. * @return The database-specific String representation of the time. */ protected String renderCalendar(Calendar calendar) { return new SimpleDateFormat(getDateFormat()).format(calendar.getTime()); } /** * Renders the UNIQUE constraint as defined by the * database-specific DDL syntax. This method is a delegate of other, more * complex methods such as {@link #renderField(net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField, DatabaseProvider.RenderFieldOptions)}. The default * implementation just returns UNIQUE. Implementations may * override this method to return an empty {@link String} if the database * in question does not support the constraint. * * @return The database-specific rendering of UNIQUE. */ protected String renderUnique() { return "UNIQUE"; } /** * Returns the database-specific TIMESTAMP text format as defined by * the {@link SimpleDateFormat} syntax. This format should include * the time down to the second (or even more precise, if allowed by * the database). The default implementation returns the format for * MySQL, which is: yyyy-MM-dd HH:mm:ss * * @return The database-specific TIMESTAMP text format */ protected String getDateFormat() { return "yyyy-MM-dd HH:mm:ss"; } /** * Renders the database-specific DDL type for the field in question. * This method merely delegates to the {@link #convertTypeToString(DatabaseType)} * method, passing the field type. Thus, it is rarely necessary * (if ever) to override this method. It may be deprecated in a * future release. * * @param field The field which contains the type to be rendered. * @return The database-specific type DDL rendering. */ protected String renderFieldType(DDLField field) { return convertTypeToString(field.getType()); } /** *

Renders the specified {@link DatabaseFunction} in its * database-specific form. For example, for MySQL the * CURRENT_DATE enum value would be rendered as * "CURRENT_DATE" (without the quotes). For functions * which do not have a database equivalent, a default literal value * of the appropriate type should be returned. For example, if MySQL * did not define either a CURRENT_DATE or a CURRENT_TIMESTAMP * function, the appropriate return value for both functions would * be '0000-00-00 00:00:00' (including the quotes). * This is to prevent migrations from failing even in cases where * non-standard functions are used.

*

*

As of 1.0, no unconventional functions are allowed by the * {@link DatabaseFunction} enum, thus no database should have any * problems with any allowed functions.

* * @param func The abstract function to be rendered. * @return The database-specific DDL representation of the function * in question. */ protected String renderFunction(DatabaseFunction func) { switch (func) { case CURRENT_DATE: return "CURRENT_DATE"; case CURRENT_TIMESTAMP: return "CURRENT_TIMESTAMP"; } return null; } /** *

Renders the appropriate field suffix to allow for the * {@link OnUpdate} functionality. For most databases (read: * all but MySQL) this will return an empty String. This is * because few databases provide an implicit ON UPDATE syntax for * fields. As such, most databases will be compelled to return * an empty String and implement the functionality using triggers. * * @param field The field for which the ON UPDATE clause should * be rendered. * @return The database-specific ON UPDATE field clause. */ protected String renderOnUpdate(DDLField field) { StringBuilder back = new StringBuilder(); back.append(" ON UPDATE "); back.append(renderValue(field.getOnUpdate())); return back.toString(); } public Object handleBlob(ResultSet res, Class type, String field) throws SQLException { final Blob blob = res.getBlob(field); if (type.equals(InputStream.class)) { return blob.getBinaryStream(); } else if (type.equals(byte[].class)) { return blob.getBytes(1, (int) blob.length()); } else { return null; } } /** *

Determines whether or not the database allows explicit precisions * for the field in question. This is to support databases such as * Derby which do not support precisions for certain types. By * default, this method returns true.

*

*

More often than not, all that is required for this determination * is the type. As such, the method signature may change in a future * release.

* * @param field The field for which precision should/shouldn't be rendered. * @return true if precision should be rendered, otherwise * false. */ protected boolean considerPrecision(DDLField field) { switch (field.getType().getType()) { case Types.TIMESTAMP: return false; } return true; } /** * Retrieves the name of the trigger which corresponds to the field * in question (if any). If no trigger will be automatically created * for the specified field, null should be returned. * This function is to allow for databases which require the use of * triggers on a field to allow for certain functionality (like * ON UPDATE). The default implementation returns null. * * * @param triggerNameConverter * @param table The table which contains the field for which a trigger * may or may not exist. * @param field The field for which a previous migration may have * created a trigger. * @return The unique name of the trigger which was created for the * field, or null if none. * @see #renderTriggerForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.SequenceNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) */ protected String getTriggerNameForField(TriggerNameConverter triggerNameConverter, DDLTable table, DDLField field) { return null; } /** * Renders the trigger which corresponds to the specified field, or * null if none. This is to allow for databases which * require the use of triggers to provide functionality such as ON * UPDATE. The default implementation returns null. * * * * @param triggerNameConverter * @param sequenceNameConverter *@param table The table containing the field for which a trigger * may need to be rendered. * @param field The field for which the trigger should be rendered, * if any. @return A database-specific DDL statement creating a trigger for * the field in question, or null. * @see #getTriggerNameForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) */ protected String renderTriggerForField(TriggerNameConverter triggerNameConverter, SequenceNameConverter sequenceNameConverter, DDLTable table, DDLField field) { return null; } /** * Retrieves the name of the function which corresponds to the field * in question (if any). If no function will be automatically created * for the specified field, null should be returned. * This method is to allow for databases which require the use of * explicitly created functions which correspond to triggers (e.g. * PostgreSQL). Few providers will need to override the default * implementation of this method, which returns null. * * * @param triggerNameConverter * @param table The table which contains the field for which a function * may or may not exist. * @param field The field for which a previous migration may have * created a function. * @return The unique name of the function which was created for the * field, or null if none. */ protected String getFunctionNameForField(TriggerNameConverter triggerNameConverter, DDLTable table, DDLField field) { final String triggerName = getTriggerNameForField(triggerNameConverter, table, field); return triggerName != null ? triggerName + "()" : null; } /** * Renders the function which corresponds to the specified field, or * null if none. This is to allow for databases which * require the use of triggers and explicitly created functions to * provide functionality such as ON UPDATE (e.g. PostgreSQL). The * default implementation returns null. * * * * @param triggerNameConverter * @param table The table containing the field for which a function * may need to be rendered. * @param field The field for which the function should be rendered, * if any. * @return A database-specific DDL statement creating a function for * the field in question, or null. * @see #getFunctionNameForField(net.java.ao.schema.TriggerNameConverter, net.java.ao.schema.ddl.DDLTable, net.java.ao.schema.ddl.DDLField) */ protected String renderFunctionForField(TriggerNameConverter triggerNameConverter, DDLTable table, DDLField field) { return null; } /** *

Generates an INSERT statement to be used to create a new row in the * database, returning the primary key value. This method also invokes * the delegate method, {@link #executeInsertReturningKey(EntityManager, java.sql.Connection, Class, String, String, DBParam...)} * passing the appropriate parameters and query. This method is required * because some databases do not support the JDBC parameter * RETURN_GENERATED_KEYS (such as HSQLDB and PostgreSQL). * Also, some databases (such as MS SQL Server) require odd tricks to * support explicit value passing to auto-generated fields. This method * should take care of any extra queries or odd SQL generation required * to implement both auto-generated primary key returning, as well as * explicit primary key value definition.

*

*

Overriding implementations of this method should be sure to use the * {@link Connection} instance passed to the method, not a new * instance generated using the {@link #getConnection()} method. This is * because this method is in fact a delegate invoked by {@link EntityManager} * as part of the entity creation process and may be part of a transaction, * a bulk creation or some more complicated operation. Both optimization * and usage patterns on the API dictate that the specified connection * instance be used. Implementations may assume that the given connection * instance is never null.

*

*

The default implementation of this method should be sufficient for any * fully compliant ANSI SQL database with a properly implemented JDBC * driver. Note that this method should not not actually execute * the SQL it generates, but pass it on to the {@link #executeInsertReturningKey(EntityManager, java.sql.Connection, Class, String, String, DBParam...)} * method, allowing for functional delegation and better extensibility. * However, this method may execute any additional statements required to * prepare for the INSERTion (as in the case of MS SQL Server which requires * some config parameters to be set on the database itself prior to INSERT).

* * @param manager The EntityManager which was used to dispatch * the INSERT in question. * @param conn The connection to be used in the eventual execution of the * generated SQL statement. * @param pkType The Java type of the primary key value. Can be used to * perform a linear search for a specified primary key value in the * params list. The return value of the method must be of * the same type. * @param pkField The database field which is the primary key for the * table in question. Can be used to perform a linear search for a * specified primary key value in the params list. * @param pkIdentity Flag indicating whether or not the primary key field * is auto-incremented by the database (IDENTITY field). * @param table The name of the table into which the row is to be INSERTed. * @param params A varargs array of parameters to be passed to the * INSERT statement. This may include a specified value for the * primary key. * @throws SQLException If the INSERT fails in the delegate method, or * if any additional statements fail with an exception. * @see #executeInsertReturningKey(EntityManager, java.sql.Connection, Class, String, String, DBParam...) */ @SuppressWarnings("unused") public T insertReturningKey(EntityManager manager, Connection conn, Class pkType, String pkField, boolean pkIdentity, String table, DBParam... params) throws SQLException { final StringBuilder sql = new StringBuilder("INSERT INTO " + withSchema(table) + " ("); for (DBParam param : params) { sql.append(processID(param.getField())); sql.append(','); } if (params.length > 0) { sql.setLength(sql.length() - 1); } else { sql.append(processID(pkField)); } sql.append(") VALUES ("); for (DBParam param : params) { sql.append("?,"); } if (params.length > 0) { sql.setLength(sql.length() - 1); } else { sql.append("DEFAULT"); } sql.append(")"); return executeInsertReturningKey(manager, conn, pkType, pkField, sql.toString(), params); } /** *

Delegate method to execute an INSERT statement returning any auto-generated * primary key values. This method is primarily designed to be called as a delegate * from the {@link #insertReturningKey(EntityManager, Connection, Class, String, boolean, String, DBParam...)} * method. The idea behind this method is to allow custom implementations to * override this method to potentially execute other statements (such as getting the * next value in a sequence) rather than the default implementaiton which uses the * JDBC constant, RETURN_GENERATED_KEYS. Any database which has a * fully-implemented JDBC driver should have no problems with the default * implementation of this method.

*

*

Part of the design behind splitting insertReturningKey and * executeInsertReturningKey is so that logic for generating the actual * INSERT statement need not be duplicated throughout the code and in custom * implementations providing trivial changes to the default algorithm. This method * should avoid actually generating SQL if at all possible.

*

*

This method should iterate through the passed DBParam(s) to * ensure that no primary key value was explicitly specified. If one was, it * should be used in leiu of one which is auto-generated by the database. Also, * it is this value which should be returned if specified, rather than the value * which would have been generated or null. As such, this method * should always return exactly the value of the primary key field in the row which * was just inserted, regardless of what that value may be.

*

*

In cases where the database mechanism for getting the next primary key value * is not thread safe, this method should be declared synchronized, * or some thread synchronization technique employed. Unfortunately, it is not * always possible to ensure that no other INSERT could (potentially) "steal" the * expected value out from under the algorithm. Such scenarios are to be avoided * when possible, but the algorithm need not take extremely escoteric concurrency * cases into account. (see the HSQLDB provider for an example of such a * less-than-thorough asynchronous algorithm)

*

*

IMPORTANT: The INSERT {@link Statement} must use the specified * connection, rather than a new one retrieved from {@link #getConnection()} or * equivalent. This is because the INSERT may be part of a bulk insertion, a * transaction, or possibly another such operation. It is also important to note * that this method should not close the connection. Doing so could cause the * entity creation algorithm to fail at a higher level up the stack.

* * @param manager The EntityManager which was used to dispatch * the INSERT in question. * @param conn The database connection to use in executing the INSERT statement. * @param pkType The Java class type of the primary key field (for use both in * searching the params as well as performing value conversion * of auto-generated DB values into proper Java instances). * @param pkField The database field which is the primary key for the * table in question. Can be used to perform a linear search for a * specified primary key value in the params list. * @param params A varargs array of parameters to be passed to the * INSERT statement. This may include a specified value for the * primary key. @throws SQLException If the INSERT fails in the delegate method, or * if any additional statements fail with an exception. * @see #insertReturningKey(EntityManager, Connection, Class, String, boolean, String, DBParam...) */ protected T executeInsertReturningKey(EntityManager manager, Connection conn, Class pkType, String pkField, String sql, DBParam... params) throws SQLException { T back = null; final PreparedStatement stmt = preparedStatement(conn, sql, Statement.RETURN_GENERATED_KEYS); for (int i = 0; i < params.length; i++) { Object value = params[i].getValue(); if (value instanceof RawEntity) { value = Common.getPrimaryKeyValue((RawEntity) value); } if (params[i].getField().equalsIgnoreCase(pkField)) { back = (T) value; } if (value == null) { putNull(stmt, i + 1); } else { DatabaseType type = (DatabaseType) TypeManager.getInstance().getType(value.getClass()); type.putToDatabase(manager, stmt, i + 1, value); } } stmt.executeUpdate(); if (back == null) { ResultSet res = stmt.getGeneratedKeys(); if (res.next()) { back = TypeManager.getInstance().getType(pkType).pullFromDatabase(null, res, pkType, 1); } res.close(); } stmt.close(); return back; } /** * Stores an SQL NULL value in the database. This method * is required due to the fact that not all JDBC drivers handle NULLs * in the same fashion. The default implementation calls {@link PreparedStatement#setString(int, String)}, * passing null as a value. Databases which require a * different implementation (e.g. PostgreSQL) should override this method. * * @param stmt The statement in which to store the NULL value. * @param index The index of the parameter which should be assigned NULL. */ public void putNull(PreparedStatement stmt, int index) throws SQLException { stmt.setString(index, null); } /** * Stors an SQL BOOLEAN value in the database. Most databases * handle differences in BOOLEAN semantics within their JDBC * driver(s). However, some do not implement the {@link PreparedStatement#setBoolean(int, boolean)} * method correctly. To work around this defect, any database providers * for such databases should override this method to store boolean values in * the relevant fashion. * * @param stmt The statement in which to store the BOOLEAN value. * @param index The index of the parameter which should be assigned. * @param value The value to be stored in the relevant field. */ public void putBoolean(PreparedStatement stmt, int index, boolean value) throws SQLException { stmt.setBoolean(index, value); } /** * Simple helper function used to determine of the specified JDBC * type is representitive of a numeric type. The definition of * numeric type in this case may be assumed to be any type which * has a corresponding (or coercibly corresponding) Java class * which is a subclass of {@link Number}. The default implementation * should be suitable for every conceivable use-case. * * @param type The JDBC type which is to be tested. * @return true if the specified type represents a numeric * type, otherwise false. */ protected boolean isNumericType(int type) { switch (type) { case Types.BIGINT: return true; case Types.BIT: return true; case Types.DECIMAL: return true; case Types.DOUBLE: return true; case Types.FLOAT: return true; case Types.INTEGER: return true; case Types.NUMERIC: return true; case Types.REAL: return true; case Types.SMALLINT: return true; case Types.TINYINT: return true; } return false; } protected String processOnClause(String on) { return SqlUtils.ON_CLAUSE.matcher(on).replaceAll("$1" + processID("$2") + " = " + "$3" + processID("$4")); } /** * Processes the where clause to make it compatible with the given database. By default it simply escapes IDs that * need to be. * * @param where the raw where clause, * @return the processed where clause compatible with the database in use. * @see #processID(String) */ public final String processWhereClause(String where) { return SqlUtils.processWhereClause(where, new Function() { @Override public String apply(String id) { return processID(id); } }); } /** *

Performs any database specific post-processing on the specified * identifier. This usually means quoting reserved words, though it * could potentially encompass other tasks. This method is called * with unbelievable frequency and thus must return extremely quickly.

*

*

The default implementation checks two factors: max identifier * length and whether or not it represents a reserved word in the * underlying database. If the identifier excedes the maximum ID * length for the database in question, the excess text will be * hashed against the hash code for the whole and concatenated with * the leading remainder. The {@link #shouldQuoteID(String)} method * is utilitized to determine whether or not the identifier in question * should be quoted. For most databases, this involves checking a set * of reserved words, but the method is flexible enough to allow more * complex "reservations rules" (such as those required by Oracle). * If the identifier is reserved in any way, the database-specific * quoting string will be retrieved from {@link DatabaseMetaData} and * used to enclose the identifier. This method cannot simply quote all * identifiers by default due to the way that some databases (such as * HSQLDB and PostgreSQL) attach extra significance to quoted fields.

*

*

The general assurance of this method is that for any input identfier, * this method will return a correspondingly-unique identifier which is * guarenteed to be valid within the underlying database.

* * @param id The identifier to process. * @return A unique identifier corresponding with the input which is * guaranteed to function within the underlying database. * @see #getMaxIDLength() * @see #shouldQuoteID(String) */ public final String processID(String id) { return quote(shorten(id)); } public final String withSchema(String tableName) { final String processedTableName = processID(tableName); return isSchemaNotEmpty() ? schema + "." + processedTableName : processedTableName; } protected final boolean isSchemaNotEmpty() { return schema != null && schema.length() > 0; } public final String shorten(String id) { return Common.shorten(id, getMaxIDLength()); } public final String quote(String id) { if (shouldQuoteID(id)) { loadQuoteString(); return quote + id + quote; } return id; } /** * Determines whether or not the specified identifier should be quoted * before transmission to the underlying database. The default implementation * transforms the identifier into all-upper-case and checks the result * against {@link #getReservedWords()}. Databases with more complicated * rules regarding quoting should provide a custom implementation of this * method. * * @param id The identifier to check against the quoting rules. * @return true if the specified identifier is invalid under * the relevant quoting rules, otherwise false. */ protected boolean shouldQuoteID(String id) { return getReservedWords().contains(id.toUpperCase()); } /** * Returns the maximum length for any identifier in the underlying database. * If the database defines different maximum lengths for different identifier * types, the minimum value should be returned by this method. By * default, this just returns {@link Integer#MAX_VALUE}. * * @return The maximum identifier length for the database. */ protected int getMaxIDLength() { return Integer.MAX_VALUE; } /** * Retrieves the set of all reserved words for the underlying database. The * set returns should be speculative, meaning that it should include any * possible reserved words, not just those for a particular version. * As an implementation guideline, the {@link Set} instance returned from this * method should guarentee O(1) lookup times, otherwise ORM performance * will suffer greatly. * * @return A set of upper case reserved words specific * to the database. */ protected abstract Set getReservedWords(); /** * Flag indicating whether or not the underlying database uses case-sensetive * identifiers. This specifically affects comparisons in the {@link SchemaReader} * utility. The default value is true. Note that databases which * support both case-sensetive and case-insensetive identifiers (like MySQL) should * return true for better all-around compatibility. * * @return boolean true if identifiers are case-sensetive, * false otherwise. */ public boolean isCaseSensetive() { return true; } /** * Tells whether this exception should be ignored when running an updated statement. Typically, errors on dropping * non-existing objects should be ignored. * * @param sql * @param e the {@link java.sql.SQLException} that occured. * @throws SQLException throws the SQLException if it should not be ignored. */ public void handleUpdateError(String sql, SQLException e) throws SQLException { sqlLogger.error("Exception executing SQL update <" + sql + ">", e); throw e; } public final PreparedStatement preparedStatement(Connection c, CharSequence sql) throws SQLException { final String sqlString = sql.toString(); onSql(sqlString); return c.prepareStatement(sqlString); } public final PreparedStatement preparedStatement(Connection c, CharSequence sql, int autoGeneratedKeys) throws SQLException { final String sqlString = sql.toString(); onSql(sqlString); return c.prepareStatement(sqlString, autoGeneratedKeys); } public final PreparedStatement preparedStatement(Connection c, CharSequence sql, int resultSetType, int resultSetConcurrency) throws SQLException { final String sqlString = sql.toString(); onSql(sqlString); return c.prepareStatement(sqlString, resultSetType, resultSetConcurrency); } public final void executeUpdate(Statement stmt, CharSequence sql) throws SQLException { final String sqlString = sql.toString(); try { onSql(sqlString); checkNotNull(stmt).executeUpdate(sqlString); } catch (SQLException e) { handleUpdateError(sqlString, e); } } public final void addSqlListener(SqlListener l) { sqlListeners.add(l); } public final void removeSqlListener(SqlListener l) { sqlListeners.remove(l); } protected final void onSql(String sql) { for (SqlListener sqlListener : sqlListeners) { sqlListener.onSql(sql); } } private static boolean isBlank(String str) { int strLen; if (str == null || (strLen = str.length()) == 0) { return true; } for (int i = 0; i < strLen; i++) { if (!Character.isWhitespace(str.charAt(i))) { return false; } } return true; } protected static class RenderFieldOptions { public final boolean renderUnique; public final boolean renderDefault; public final boolean forceNull; public RenderFieldOptions(boolean renderUnique, boolean renderDefault) { this(renderUnique, renderDefault, false); } public RenderFieldOptions(boolean renderUnique, boolean renderDefault, boolean forceNull) { this.renderUnique = renderUnique; this.renderDefault = renderDefault; this.forceNull = forceNull; } } public static interface SqlListener { void onSql(String sql); } private final static class LoggingSqlListener implements SqlListener { private final Logger logger; public LoggingSqlListener(Logger logger) { this.logger = checkNotNull(logger); } public void onSql(String sql) { logger.debug(sql); } } }