Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.openjpa.jdbc.schema;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.io.Writer;
import java.net.URL;
import java.security.AccessController;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import javax.sql.DataSource;
import org.apache.openjpa.conf.OpenJPAConfiguration;
import org.apache.openjpa.jdbc.conf.JDBCConfiguration;
import org.apache.openjpa.jdbc.conf.JDBCConfigurationImpl;
import org.apache.openjpa.jdbc.identifier.DBIdentifier;
import org.apache.openjpa.jdbc.sql.DBDictionary;
import org.apache.openjpa.jdbc.sql.SQLExceptions;
import org.apache.openjpa.lib.conf.Configurations;
import org.apache.openjpa.lib.jdbc.DelegatingDataSource;
import org.apache.openjpa.lib.log.Log;
import org.apache.openjpa.lib.meta.MetaDataSerializer;
import org.apache.openjpa.lib.util.Files;
import org.apache.openjpa.lib.util.J2DoPrivHelper;
import org.apache.openjpa.lib.util.Localizer;
import org.apache.openjpa.lib.util.Options;
import org.apache.openjpa.lib.util.StringUtil;
import org.apache.openjpa.util.InvalidStateException;
/**
* The SchemaTool is used to manage the database schema. Note that the
* tool never adds or drops unique constraints from existing tables, because
* JDBC {@link DatabaseMetaData} does not include information on these
* constraints.
*
* @author Abe White
* @author Patrick Linskey
*/
public class SchemaTool {
public static final String ACTION_ADD = "add";
public static final String ACTION_DROP = "drop";
public static final String ACTION_DROP_SCHEMA = "dropSchema";
public static final String ACTION_RETAIN = "retain";
public static final String ACTION_REFRESH = "refresh";
public static final String ACTION_BUILD = "build";
public static final String ACTION_REFLECT = "reflect";
public static final String ACTION_CREATEDB = "createDB";
public static final String ACTION_DROPDB = "dropDB";
public static final String ACTION_IMPORT = "import";
public static final String ACTION_EXPORT = "export";
public static final String ACTION_DELETE_TABLE_CONTENTS = "deleteTableContents";
public static final String ACTION_EXECUTE_SCRIPT = "executeScript";
public static final String[] ACTIONS = new String[]{
ACTION_ADD,
ACTION_DROP,
ACTION_DROP_SCHEMA,
ACTION_RETAIN,
ACTION_REFRESH,
ACTION_BUILD,
ACTION_REFLECT,
ACTION_CREATEDB,
ACTION_DROPDB,
ACTION_IMPORT,
ACTION_EXPORT,
ACTION_DELETE_TABLE_CONTENTS,
ACTION_EXECUTE_SCRIPT
};
protected static final Localizer _loc = Localizer.forPackage(SchemaTool.class);
protected final JDBCConfiguration _conf;
protected final DataSource _ds;
protected final Log _log;
protected final DBDictionary _dict;
private final String _action;
private boolean _ignoreErrs = false;
private boolean _openjpaTables = false;
private boolean _dropTables = true;
private boolean _dropSeqs = true;
private boolean _pks = true;
private boolean _fks = true;
private boolean _indexes = true;
private boolean _seqs = true;
private boolean _rollbackBeforeDDL = true;
private PrintWriter _writer = null;
private SchemaGroup _group = null;
private SchemaGroup _db = null;
protected boolean _fullDB = false;
protected String _sqlTerminator = ";";
protected String _scriptToExecute = null;
/**
* Default constructor. Tools constructed this way will not have an
* action, so the {@link #run()} method will be a no-op.
*/
public SchemaTool(JDBCConfiguration conf) {
this(conf, null);
}
/**
* Construct a tool to perform the given action.
*/
public SchemaTool(JDBCConfiguration conf, String action) {
if (action != null && !Arrays.asList(ACTIONS).contains(action)) {
Configurations.configureInstance(this, conf, action, action);
}
_conf = conf;
_action = action;
_ds = ACTION_BUILD.equals(action) ? null : conf.getDataSource2(null);
_log = conf.getLog(JDBCConfiguration.LOG_SCHEMA);
// initialize this up-front; otherwise the dbdictionaryfactory might
// try to take a connection to initialize when we've already got one:
// bad news if the max pool is 1
_dict = _conf.getDBDictionaryInstance();
}
/**
* Cleanup DataSource after run()/record()
*/
public void clear() {
if (_ds != null && _ds instanceof DelegatingDataSource) {
try {
((DelegatingDataSource)_ds).close();
} catch (Exception e) {
// no-op
}
}
}
/**
* The action supplied on construction.
*/
public String getAction() {
return _action;
}
/**
* If true, SQLExceptions thrown during schema manipulation will be
* printed but ignored.
*/
public boolean getIgnoreErrors() {
return _ignoreErrs;
}
/**
* If true, SQLExceptions thrown during schema manipulation will be
* printed but ignored.
*/
public void setIgnoreErrors(boolean ignoreErrs) {
_ignoreErrs = ignoreErrs;
}
/**
* Whether to act on special tables used by OpenJPA components
* for bookkeeping.
*/
public boolean getOpenJPATables() {
return _openjpaTables;
}
/**
* Whether to act on special tables used by OpenJPA components
* for bookkeeping.
*/
public void setOpenJPATables(boolean openjpaTables) {
_openjpaTables = openjpaTables;
}
/**
* If true, tables that appear to be unused will be dropped. Defaults to
* true.
*/
public boolean getDropTables() {
return _dropTables;
}
/**
* If true, tables that appear to be unused will be dropped. Defaults to
* true.
*/
public void setDropTables(boolean dropTables) {
_dropTables = dropTables;
}
/**
* If true, sequences that appear to be unused will be dropped. Defaults
* to true.
*/
public boolean getDropSequences() {
return _dropSeqs;
}
/**
* If true, sequences that appear to be unused will be dropped. Defaults
* to true.
*/
public void setDropSequences(boolean dropSeqs) {
_dropSeqs = dropSeqs;
if (dropSeqs)
setSequences(true);
}
/**
* If true, rollback will be performed before each DDL statement is executed. Defaults to true.
*/
public boolean getRollbackBeforeDDL() {
return _rollbackBeforeDDL;
}
/**
* If true, rollback will be performed before each DDL statement is executed. Defaults to true.
*/
public void setRollbackBeforeDDL(boolean rollbackBeforeDDL) {
_rollbackBeforeDDL = rollbackBeforeDDL;
}
/**
* Whether sequences should be manipulated. Defaults to true.
*/
public boolean getSequences() {
return _seqs;
}
/**
* Whether sequences should be manipulated. Defaults to true.
*/
public void setSequences(boolean seqs) {
_seqs = seqs;
}
/**
* Whether indexes on existing tables should be manipulated.
* Defaults to true.
*/
public boolean getIndexes() {
return _indexes;
}
/**
* Whether indexes on existing tables should be manipulated.
* Defaults to true.
*/
public void setIndexes(boolean indexes) {
_indexes = indexes;
}
/**
* Whether foreign keys on existing tables should be manipulated.
* Defaults to true.
*/
public boolean getForeignKeys() {
return _fks;
}
/**
* Whether foreign keys on existing tables should be manipulated.
* Defaults to true.
*/
public void setForeignKeys(boolean fks) {
_fks = fks;
}
/**
* Whether primary keys on existing tables should be manipulated.
* Defaults to true.
*/
public boolean getPrimaryKeys() {
return _pks;
}
/**
* Whether primary keys on existing tables should be manipulated.
* Defaults to true.
*/
public void setPrimaryKeys(boolean pks) {
_pks = pks;
}
/**
* The stream to write to for the creation of SQL scripts. If the
* stream is non-null, all SQL will be written to this stream rather than
* executed against the database.
*/
public Writer getWriter() {
return _writer;
}
/**
* The stream to write to for the creation of SQL scripts. If the
* stream is non-null, all SQL will be written to this stream rather than
* executed against the database.
*/
public void setWriter(Writer writer) {
if (writer == null)
_writer = null;
else if (writer instanceof PrintWriter)
_writer = (PrintWriter) writer;
else
_writer = new PrintWriter(writer);
}
public void setSQLTerminator(String t) {
_sqlTerminator = t;
}
public void setScriptToExecute(String scriptToExecute) {
_scriptToExecute = scriptToExecute;
}
/**
* Return the schema group the tool will act on.
*/
public SchemaGroup getSchemaGroup() {
return _group;
}
/**
* Set the schema group the tool will act on.
*/
public void setSchemaGroup(SchemaGroup group) {
_group = group;
}
///////////
// Actions
///////////
/**
* Run the tool action.
*/
public void run()
throws SQLException {
if (_action == null)
return;
if (ACTION_ADD.equals(_action))
add();
else if (ACTION_DROP.equals(_action))
drop();
else if (ACTION_DROP_SCHEMA.equals(_action))
dropSchema();
else if (ACTION_RETAIN.equals(_action))
retain();
else if (ACTION_REFRESH.equals(_action))
refresh();
else if (ACTION_BUILD.equals(_action))
build();
else if (ACTION_CREATEDB.equals(_action))
createDB();
else if (ACTION_DROPDB.equals(_action))
dropDB();
else if (ACTION_DELETE_TABLE_CONTENTS.equals(_action))
deleteTableContents();
else if (ACTION_EXECUTE_SCRIPT.equals(_action)) {
executeScript();
}
}
/**
* Adds any components present in the schema repository but absent from
* the database.
*/
protected void add()
throws SQLException {
add(getDBSchemaGroup(false), assertSchemaGroup());
}
/**
* Drops all schema components in the schema repository that also exist
* in the database.
*/
protected void drop()
throws SQLException {
drop(getDBSchemaGroup(false), assertSchemaGroup());
}
/**
* Drops all schema components in the schema repository.
*/
protected void dropSchema()
throws SQLException {
drop(getDBSchemaGroup(false), assertSchemaGroup(), false);
}
/**
* Drops database components that are not mentioned in the schema
* repository.
*/
protected void retain()
throws SQLException {
retain(getDBSchemaGroup(true), assertSchemaGroup(),
getDropTables(), getDropSequences());
}
/**
* Adds any components present in the schema repository but absent from
* the database, and drops unused database components.
*/
protected void refresh()
throws SQLException {
SchemaGroup local = assertSchemaGroup();
SchemaGroup db = getDBSchemaGroup(true);
retain(db, local, getDropTables(), getDropSequences());
add(db, local);
}
/**
* Re-execute all SQL used for the creation of the current database;
* this action is usually used when creating SQL scripts.
*/
protected void createDB()
throws SQLException {
SchemaGroup group = new SchemaGroup();
group.addSchema();
add(group, getDBSchemaGroup(true));
}
/**
* Re-execute all SQL used for the creation of the current database;
* this action is usually used when creating SQL scripts.
*/
protected void build()
throws SQLException {
SchemaGroup group = new SchemaGroup();
group.addSchema();
buildSchema(group, assertSchemaGroup(), true);
}
/**
* Drop the current database.
*/
protected void dropDB()
throws SQLException {
retain(getDBSchemaGroup(true), new SchemaGroup(), true, true);
}
/**
* Issue DELETE statement against all known tables.
*/
protected void deleteTableContents()
throws SQLException {
SchemaGroup group = getSchemaGroup();
Schema[] schemas = group.getSchemas();
Collection
tables = new LinkedHashSet<>();
for (Schema schema : schemas) {
Table[] ts = schema.getTables();
for (Table t : ts) {
tables.add(t);
}
}
Table[] tableArray = tables.toArray(new Table[tables.size()]);
Connection conn = _ds.getConnection();
try {
String[] sql = _conf.getDBDictionaryInstance()
.getDeleteTableContentsSQL(tableArray, conn);
if (!executeSQL(sql)) {
_log.warn(_loc.get("delete-table-contents"));
}
} finally {
closeConnection(conn);
}
}
protected void executeScript() throws SQLException {
if (_scriptToExecute == null) {
_log.warn(_loc.get("generating-execute-script-not-defined"));
return;
}
URL url = AccessController.doPrivileged(
J2DoPrivHelper.getResourceAction(_conf.getClassResolverInstance().
getClassLoader(SchemaTool.class, null), _scriptToExecute));
if (url == null) {
_log.error(_loc.get("generating-execute-script-not-found", _scriptToExecute));
return;
}
_log.info(_loc.get("generating-execute-script", _scriptToExecute));
BufferedReader reader = null;
try {
reader = new BufferedReader(new InputStreamReader(url.openStream()));
String sql;
List script = new ArrayList<>();
while ((sql = reader.readLine()) != null) {
sql = sql.trim();
if (sql.startsWith("--") || sql.startsWith("/*") || sql.startsWith("//")) {
continue;
}
int semiColonPosition = sql.indexOf(";"); // ';' can be in string, don't blindly drop it
if (sql.endsWith(";")) {
sql = sql.substring(0, sql.length() - 1);
}
if (sql.isEmpty()) {
continue;
}
script.add(sql);
}
executeSQL(script.toArray(new String[script.size()]));
} catch (IOException e) {
_log.error(e.getMessage(), e);
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException e) {
_log.error(e.getMessage(), e);
}
}
}
/**
* Record the changes made to the DB in the current {@link SchemaFactory}.
*/
public void record() {
if (_db != null && _writer == null)
_conf.getSchemaFactoryInstance().storeSchema(_db);
}
/**
* Adds all database components in the repository schema that are not
* present in the given database schema to the database.
*/
protected void add(SchemaGroup db, SchemaGroup repos)
throws SQLException {
buildSchema(db, repos, true);
}
protected void buildSchema(SchemaGroup db, SchemaGroup repos, boolean considerDatabaseState) throws SQLException {
// add sequences
Schema[] schemas = repos.getSchemas();
Schema schema;
if (_seqs) {
Sequence[] seqs;
for (Schema value : schemas) {
seqs = value.getSequences();
for (Sequence seq : seqs) {
if (considerDatabaseState && db.findSequence(value, seq.getQualifiedPath()) != null) {
continue;
}
if (createSequence(seq)) {
schema = db.getSchema(seq.getSchemaIdentifier());
if (schema == null)
schema = db.addSchema(seq.getSchemaIdentifier());
schema.importSequence(seq);
}
else
_log.warn(_loc.get("add-seq", seq));
}
}
}
// order is important in this method; start with columns
Table[] tabs;
Table dbTable = null;
Column[] cols;
Column col;
DBIdentifier defaultSchemaName = DBIdentifier.newSchema(_dict.getDefaultSchemaName());
for (Schema schema2 : schemas) {
tabs = schema2.getTables();
for (Table tab : tabs) {
cols = tab.getColumns();
if (considerDatabaseState) {
dbTable = db.findTable(schema2, tab.getQualifiedPath(), defaultSchemaName);
}
for (Column column : cols) {
if (dbTable != null) {
DBIdentifier colName = column.getIdentifier();
col = dbTable.getColumn(colName);
if (col == null) {
if (addColumn(column))
dbTable.importColumn(column);
else
_log.warn(_loc.get("add-col", column,
tab));
}
else if (!column.equalsColumn(_dict, col)) {
_log.warn(_loc.get("bad-col", new Object[]{
col, dbTable, col.getDescription(),
column.getDescription()}));
}
}
}
}
}
// primary keys
if (_pks) {
PrimaryKey pk;
for (Schema value : schemas) {
tabs = value.getTables();
for (Table tab : tabs) {
pk = tab.getPrimaryKey();
if (considerDatabaseState) {
dbTable = db.findTable(value, tab.getQualifiedPath());
}
if (pk != null && !pk.isLogical() && dbTable != null) {
if (dbTable.getPrimaryKey() == null
&& addPrimaryKey(pk))
dbTable.importPrimaryKey(pk);
else if (dbTable.getPrimaryKey() == null)
_log.warn(_loc.get("add-pk", pk, tab));
else if (!pk.equalsPrimaryKey(dbTable.getPrimaryKey()))
_log.warn(_loc.get("bad-pk",
dbTable.getPrimaryKey(), dbTable));
}
}
}
}
// tables
Set
newTables = new HashSet<>();
for (Schema schema1 : schemas) {
tabs = schema1.getTables();
for (Table tab : tabs) {
if (considerDatabaseState && db.findTable(schema1, tab.getQualifiedPath()) != null) {
continue;
}
if (createTable(tab)) {
newTables.add(tab);
schema = db.getSchema(tab.getSchemaIdentifier());
if (schema == null)
schema = db.addSchema(tab.getSchemaIdentifier());
schema.importTable(tab);
}
else
_log.warn(_loc.get("add-table", tab));
}
}
// indexes
Index[] idxs;
Index idx;
for (Schema element : schemas) {
tabs = element.getTables();
for (Table tab : tabs) {
// create indexes on new tables even if indexes
// have been turned off
if (!_indexes && !newTables.contains(tab))
continue;
idxs = tab.getIndexes();
if (considerDatabaseState) {
dbTable = db.findTable(element, tab.getQualifiedPath());
}
for (Index index : idxs) {
if (dbTable != null) {
idx = findIndex(dbTable, index);
if (idx == null) {
if (createIndex(index, dbTable, tab.getUniques()))
dbTable.importIndex(index);
else
_log.warn(_loc.get("add-index", index,
tab));
}
else if (!index.equalsIndex(idx))
_log.warn(_loc.get("bad-index", idx, dbTable));
}
}
}
}
// Unique Constraints on group of columns
Unique[] uniques;
for (Schema item : schemas) {
tabs = item.getTables();
for (Table tab : tabs) {
// create unique constraints only on new tables
if (!newTables.contains(tab)) {
continue;
}
uniques = tab.getUniques();
if (uniques == null || uniques.length == 0)
continue;
if (considerDatabaseState) {
dbTable = db.findTable(tab);
}
if (dbTable == null)
continue;
for (Unique unique : uniques) {
dbTable.importUnique(unique);
}
}
}
// foreign keys
ForeignKey[] fks;
ForeignKey fk;
for (Schema value : schemas) {
tabs = value.getTables();
for (Table tab : tabs) {
// create foreign keys on new tables even if fks
// have been turned off
if (!_fks && !newTables.contains(tab)) {
continue;
}
fks = tab.getForeignKeys();
if (considerDatabaseState) {
dbTable = db.findTable(value, tab.getQualifiedPath());
}
for (ForeignKey foreignKey : fks) {
if (!foreignKey.isLogical() && dbTable != null) {
fk = findForeignKey(dbTable, foreignKey);
if (fk == null) {
if (addForeignKey(foreignKey))
dbTable.importForeignKey(foreignKey);
else
_log.warn(_loc.get("add-fk",
foreignKey, tab));
}
else if (!foreignKey.equalsForeignKey(fk))
_log.warn(_loc.get("bad-fk", fk, dbTable));
}
}
}
}
}
/**
* Drops all database components that are in the given database schema
* but not in the repository schema.
*/
protected void retain(SchemaGroup db, SchemaGroup repos, boolean tables,
boolean sequences)
throws SQLException {
Schema[] schemas = db.getSchemas();
if (_seqs && sequences) {
Sequence[] seqs;
for (Schema schema : schemas) {
seqs = schema.getSequences();
for (Sequence seq : seqs) {
if (!isDroppable(seq))
continue;
if (repos.findSequence(seq) == null) {
if (dropSequence(seq))
schema.removeSequence(seq);
else
_log.warn(_loc.get("drop-seq", seq));
}
}
}
}
// order is important in this method; start with foreign keys
Table[] tabs;
Table reposTable;
if (_fks) {
ForeignKey[] fks;
ForeignKey fk;
for (Schema schema : schemas) {
tabs = schema.getTables();
for (Table tab : tabs) {
if (!isDroppable(tab))
continue;
fks = tab.getForeignKeys();
reposTable = repos.findTable(tab);
if (!tables && reposTable == null)
continue;
for (ForeignKey foreignKey : fks) {
if (foreignKey.isLogical())
continue;
fk = null;
if (reposTable != null)
fk = findForeignKey(reposTable, foreignKey);
if (reposTable == null || fk == null
|| !foreignKey.equalsForeignKey(fk)) {
if (dropForeignKey(foreignKey))
tab.removeForeignKey(foreignKey);
else
_log.warn(_loc.get("drop-fk", foreignKey,
tab));
}
}
}
}
}
// primary keys
if (_pks) {
PrimaryKey pk;
for (Schema schema : schemas) {
tabs = schema.getTables();
for (Table tab : tabs) {
if (!isDroppable(tab))
continue;
pk = tab.getPrimaryKey();
if (pk != null && pk.isLogical())
continue;
reposTable = repos.findTable(tab);
if (pk != null && reposTable != null
&& (reposTable.getPrimaryKey() == null
|| !pk.equalsPrimaryKey(reposTable.getPrimaryKey()))) {
if (dropPrimaryKey(pk))
tab.removePrimaryKey();
else
_log.warn(_loc.get("drop-pk", pk, tab));
}
}
}
}
// columns
Column[] cols;
Column col;
Collection
drops = new LinkedList<>();
for (Schema value : schemas) {
tabs = value.getTables();
for (Table tab : tabs) {
if (!isDroppable(tab))
continue;
cols = tab.getColumns();
reposTable = repos.findTable(tab);
if (reposTable != null) {
for (Column column : cols) {
col = reposTable.getColumn(column.getIdentifier());
if (col == null || !column.equalsColumn(_dict, col)) {
if (tab.getColumns().length == 1)
drops.add(tab);
else if (dropColumn(column))
tab.removeColumn(column);
else
_log.warn(_loc.get("drop-col", column,
tab));
}
}
}
}
}
// now tables
if (tables) {
for (Schema schema : schemas) {
tabs = schema.getTables();
for (Table tab : tabs)
if (isDroppable(tab)
&& repos.findTable(tab) == null)
drops.add(tab);
}
}
dropTables(drops, db);
}
protected void drop(SchemaGroup db, SchemaGroup repos)
throws SQLException {
drop(db, repos, true);
}
/**
* Drops all database components in the given repository schema.
*/
private void drop(SchemaGroup db, SchemaGroup repos, boolean considerDatabaseState)
throws SQLException {
// drop sequences
Schema[] schemas = repos.getSchemas();
if (_seqs) {
Sequence[] seqs;
Sequence dbSeq;
for (Schema schema : schemas) {
seqs = schema.getSequences();
for (Sequence seq : seqs) {
if (!isDroppable(seq))
continue;
dbSeq = db.findSequence(seq);
if (dbSeq != null) {
if (dropSequence(seq))
dbSeq.getSchema().removeSequence(dbSeq);
else
_log.warn(_loc.get("drop-seq", seq));
}
else if (_writer != null) {
dropSequence(seq);
}
}
}
}
// calculate tables to drop; we can only drop tables if we're sure
// the user listed the entire table definition in the stuff they want
// dropped; else they might just want to drop a few columns
Collection
drops = new LinkedList<>();
Table[] tabs;
Table dbTable;
Column[] dbCols;
for (Schema value : schemas) {
tabs = value.getTables();
tables:
for (Table tab : tabs) {
if (!isDroppable(tab))
continue;
if (!considerDatabaseState) {
drops.add(tab);
continue;
}
dbTable = db.findTable(tab);
if (dbTable == null) {
if (_writer != null) {
drops.add(tab);
}
continue;
}
dbCols = dbTable.getColumns();
for (Column dbCol : dbCols) {
if (!dbCol.getIdentifier().getName().equals(_dict.getIdentityColumnName()) &&
!tab.containsColumn(dbCol))
continue tables;
}
drops.add(tab);
}
}
// order is important in this method; start with foreign keys mentioned
// in the drop schema
if (_fks) {
ForeignKey[] fks;
ForeignKey fk;
for (Schema schema : schemas) {
tabs = schema.getTables();
for (Table tab : tabs) {
if (!isDroppable(tab))
continue;
fks = tab.getForeignKeys();
dbTable = db.findTable(tab);
for (ForeignKey foreignKey : fks) {
if (foreignKey.isLogical())
continue;
fk = null;
if (dbTable != null)
fk = findForeignKey(dbTable, foreignKey);
if (dbTable == null || fk == null)
continue;
if (dropForeignKey(foreignKey))
if (dbTable != null)
dbTable.removeForeignKey(fk);
else
_log.warn(_loc.get("drop-fk", foreignKey, tab));
}
}
}
// also drop imported foreign keys for tables that will be dropped
Table tab;
for (Table drop : drops) {
tab = drop;
dbTable = db.findTable(tab);
if (dbTable == null)
continue;
fks = db.findExportedForeignKeys(dbTable.getPrimaryKey());
for (ForeignKey foreignKey : fks) {
if (dropForeignKey(foreignKey))
dbTable.removeForeignKey(foreignKey);
else
_log.warn(_loc.get("drop-fk", foreignKey, dbTable));
}
}
}
// drop the tables we calculated above
dropTables(drops, db);
if (considerDatabaseState) {
// columns
Column[] cols;
Column col;
for (Schema schema : schemas) {
tabs = schema.getTables();
for (Table tab : tabs) {
if (!isDroppable(tab))
continue;
cols = tab.getColumns();
dbTable = db.findTable(tab);
for (Column column : cols) {
col = null;
if (dbTable != null)
col = dbTable.getColumn(column.getIdentifier());
if (dbTable == null || col == null)
continue;
if (dropColumn(column)) {
dbTable.removeColumn(col);
}
}
}
}
}
}
/**
* Return true if the table is droppable.
*/
protected boolean isDroppable(Table table) {
return _openjpaTables
|| (!DBIdentifier.toUpper(table.getIdentifier()).getName().startsWith("OPENJPA_")
&& !DBIdentifier.toUpper(table.getIdentifier()).getName().startsWith("JDO_")); // legacy
}
/**
* Return true if the sequence is droppable.
*/
protected boolean isDroppable(Sequence seq) {
return _openjpaTables
|| (!DBIdentifier.toUpper(seq.getIdentifier()).getName().startsWith("OPENJPA_")
&& !DBIdentifier.toUpper(seq.getIdentifier()).getName().startsWith("JDO_")); // legacy
}
/**
* Find an index in the given table that matches the given one.
*/
protected Index findIndex(Table dbTable, Index idx) {
Index[] idxs = dbTable.getIndexes();
for (Index index : idxs)
if (idx.columnsMatch(index.getColumns()))
return index;
return null;
}
/**
* Find a foreign key in the given table that matches the given one.
*/
protected ForeignKey findForeignKey(Table dbTable, ForeignKey fk) {
if (fk.getConstantColumns().length > 0
|| fk.getConstantPrimaryKeyColumns().length > 0)
return null;
ForeignKey[] fks = dbTable.getForeignKeys();
for (ForeignKey foreignKey : fks)
if (fk.columnsMatch(foreignKey.getColumns(),
foreignKey.getPrimaryKeyColumns()))
return foreignKey;
return null;
}
/**
* Remove the given collection of tables from the database schema. Orders
* the removals according to foreign key constraints on the tables.
*/
protected void dropTables(Collection
tables, SchemaGroup change)
throws SQLException {
if (tables.isEmpty())
return;
Table table;
Table changeTable;
for (Table value : tables) {
table = value;
if (dropTable(table)) {
changeTable = change.findTable(table);
if (changeTable != null)
changeTable.getSchema().removeTable(changeTable);
}
else
_log.warn(_loc.get("drop-table", table));
}
}
/**
* Add the given table to the database schema.
*
* @return true if the operation was successful, false otherwise
*/
public boolean createTable(Table table)
throws SQLException {
return executeSQL(_dict.getCreateTableSQL(table, _db));
}
/**
* Drop the given table from the database schema.
*
* @return true if the operation was successful, false otherwise
*/
public boolean dropTable(Table table)
throws SQLException {
return executeSQL(_dict.getDropTableSQL(table));
}
/**
* Add the given sequence to the database schema.
*
* @return true if the operation was successful, false otherwise
*/
public boolean createSequence(Sequence seq)
throws SQLException {
return executeSQL(_dict.getCreateSequenceSQL(seq));
}
/**
* Drop the given sequence from the database schema.
*
* @return true if the operation was successful, false otherwise
*/
public boolean dropSequence(Sequence seq)
throws SQLException {
return executeSQL(_dict.getDropSequenceSQL(seq));
}
/**
* Add the given index to the database schema.
*
* @return true if the operation was successful, false otherwise
*/
public boolean createIndex(Index idx, Table table)
throws SQLException {
return createIndex(idx, table, null);
}
public boolean createIndex(Index idx, Table table, Unique[] uniques)
throws SQLException {
// Informix will automatically create a unique index for the
// primary key, so don't create another index again
if (!_dict.needsToCreateIndex(idx,table,uniques))
return false;
int max = _dict.maxIndexesPerTable;
int len = table.getIndexes().length;
if (table.getPrimaryKey() != null)
len += table.getPrimaryKey().getColumns().length;
if (len >= max) {
_log.warn(_loc.get("too-many-indexes", idx, table, max + ""));
return false;
}
return executeSQL(_dict.getCreateIndexSQL(idx));
}
/**
* Drop the given index from the database schema.
*
* @return true if the operation was successful, false otherwise
*/
public boolean dropIndex(Index idx)
throws SQLException {
return executeSQL(_dict.getDropIndexSQL(idx));
}
/**
* Add the given column to the database schema.
*
* @return true if the operation was successful, false otherwise
*/
public boolean addColumn(Column col)
throws SQLException {
return executeSQL(_dict.getAddColumnSQL(col));
}
/**
* Drop the given column from the database schema.
*
* @return true if the operation was successful, false otherwise
*/
public boolean dropColumn(Column col)
throws SQLException {
return executeSQL(_dict.getDropColumnSQL(col));
}
/**
* Add the given primary key to the database schema.
*
* @return true if the operation was successful, false otherwise
*/
public boolean addPrimaryKey(PrimaryKey pk)
throws SQLException {
return executeSQL(_dict.getAddPrimaryKeySQL(pk));
}
/**
* Drop the given primary key from the database schema.
*
* @return true if the operation was successful, false otherwise
*/
public boolean dropPrimaryKey(PrimaryKey pk)
throws SQLException {
return executeSQL(_dict.getDropPrimaryKeySQL(pk));
}
/**
* Add the given foreign key to the database schema.
*
* @return true if the operation was successful, false otherwise
*/
public boolean addForeignKey(ForeignKey fk)
throws SQLException {
return executeSQL(_dict.getAddForeignKeySQL(fk));
}
/**
* Drop the given foreign key from the database schema.
*
* @return true if the operation was successful, false otherwise
*/
public boolean dropForeignKey(ForeignKey fk)
throws SQLException {
Connection conn = _ds.getConnection();
try {
return executeSQL(_dict.getDropForeignKeySQL(fk,conn));
} finally {
closeConnection(conn);
}
}
/**
* Return the database schema.
*/
public SchemaGroup getDBSchemaGroup() {
try {
return getDBSchemaGroup(true);
} catch (SQLException se) {
throw SQLExceptions.getStore(se, _dict);
}
}
/**
* Set the database schema.
*/
public void setDBSchemaGroup(SchemaGroup db) {
_db = db;
if (db != null)
_fullDB = true;
}
/**
* Return the database schema.
*
* @param full if false, only the tables named in the set schema
* repository will be generated
*/
protected SchemaGroup getDBSchemaGroup(boolean full)
throws SQLException {
if (_db == null || (full && !_fullDB)) {
SchemaGenerator gen = new SchemaGenerator(_conf);
gen.setPrimaryKeys(_pks);
gen.setForeignKeys(_fks);
gen.setIndexes(_indexes);
if (full)
gen.generateSchemas();
else {
// generate only the tables in the given repository
// group; some may not exist yet, which is OK; we just need
// to make sure we can detect the changes to the ones that
// do exist
Collection tables = new LinkedList<>();
SchemaGroup group = assertSchemaGroup();
Schema[] schemas = group.getSchemas();
Table[] tabs;
for (Schema schema : schemas) {
tabs = schema.getTables();
for (Table tab : tabs) {
if (DBIdentifier.isNull(tab.getSchemaIdentifier())) {
tables.add(tab.getIdentifier());
}
else {
DBIdentifier sName = tab.getFullIdentifier();
tables.add(sName);
}
}
}
if (!tables.isEmpty())
gen.generateSchemas(tables.toArray(new DBIdentifier[tables.size()]));
}
_db = gen.getSchemaGroup();
}
return _db;
}
protected SchemaGroup assertSchemaGroup() {
SchemaGroup local = getSchemaGroup();
if (local == null)
throw new InvalidStateException(_loc.get("tool-norepos"));
return local;
}
/////////////
// Utilities
/////////////
/**
* Executes the given array of non-selecting SQL statements, correctly
* logging the SQL calls and optionally ignoring errors.
*
* @return true if there was SQL to execute and the calls were
* successful, false otherwise
*/
protected boolean executeSQL(String[] sql)
throws SQLException {
// if no sql, probably b/c dictionary doesn't support operation
if (sql.length == 0)
return false;
boolean err = false;
if (_writer == null) {
// this is outside the try-catch because a failure here is
// really bad, and should not be ignored.
Connection conn = _ds.getConnection();
Statement statement = null;
boolean wasAuto = true;
try {
if (_rollbackBeforeDDL) {
wasAuto = conn.getAutoCommit();
if (!wasAuto) {
conn.setAutoCommit(true);
}
}
for (String s : sql) {
try {
if (_rollbackBeforeDDL) {
// some connections require that rollback be
// called on the connection before any DDL statements
// can be run on it, even when autocommit is on.
// This is sometimes because the connection does not
// allow DDL statements when there are multiple
// commands issued on the connection, and the
// connection pool may have issued some validation SQL.
try {
conn.rollback();
}
catch (Exception e) {
}
}
statement = conn.createStatement();
statement.executeUpdate(s);
// some connections seem to require an explicit
// commit for DDL statements, even when autocommit
// is on. The DataDirect drivers seem to suffer from
// this limitation.
try {
conn.commit();
}
catch (Exception e) {
}
}
catch (SQLException se) {
err = true;
handleException(se);
}
finally {
if (statement != null)
try {
statement.close();
}
catch (SQLException se) {
}
}
}
}
finally {
if (_rollbackBeforeDDL && !wasAuto) {
conn.setAutoCommit(false);
}
try {
closeConnection(conn);
} catch (SQLException se) {
//X TODO why catch silently?
}
}
} else {
for (String s : sql) {
_writer.println(s + _sqlTerminator);
}
_writer.flush();
}
return !err;
}
/**
* Handle the given exception, logging it and optionally ignoring it,
* depending on the flags this SchemaTool was created with.
*/
protected void handleException(SQLException sql)
throws SQLException {
if (!_ignoreErrs)
throw sql;
_log.warn(sql.getMessage(), sql);
}
////////
// Main
////////
/**
* Usage: java org.apache.openjpa.jdbc.schema.SchemaTool [option]*
* [-action/-a <add | retain | drop | refresh | createDB | dropDB
* | build | reflect | import | export>]
* <.schema file or resource>*
* Where the following options are recognized.
*
*
-properties/-p <properties file or resource>: The
* path or resource name of a OpenJPA properties file containing
* information such as the license key and connection data as
* outlined in {@link JDBCConfiguration}. Optional.
*
-<property name> <property value>: All bean
* properties of the OpenJPA {@link JDBCConfiguration} can be set by
* using their names and supplying a value. For example:
* -licenseKey adslfja83r3lkadf
*
-ignoreErrors/-i <true/t | false/f>: If false, an
* exception will will be thrown if the tool encounters any database
* exceptions; defaults to false.
*
-file/-f <stdout | output file or resource>: Use this
* option to write a SQL script for the planned schema modifications,
* rather than committing them to the database. This option also
* applies to the export and reflect actions.
*
-openjpaTables/-kt <true/t | false/f>: Under the
* reflect action, whether to reflect on tables with
* the name OPENJPA_*. Under other actions, whether to
* drop such tables. Defaults to false.
*
-dropTables/-dt <true/t | false/f>: Set this option to
* true to drop tables that appear to be unused during
* retain and refresh actions. Defaults to
* true.
*
-dropSequences/-dsq <true/t | false/f>: Set this option
* to true to drop sequences that appear to be unused during
* retain and refresh actions. Defaults to
* true.
*
-rollbackBeforeDDL/-rbddl <true/t | false/f>: Set this option
* to true to send an initail rollback on the connection before any DDL statement
* is sent
*
-primaryKeys/-pk <true/t | false/f>: Whether primary
* keys on existing tables are manipulated. Defaults to true.
*
-foreignKeys/-fk <true/t | false/f>: Whether foreign
* keys on existing tables are manipulated. Defaults to true.
*
-indexes/-ix <true/t | false/f>: Whether indexes
* on existing tables are manipulated. Defaults to true.
*
-sequences/-sq <true/t | false/f>: Whether to
* manipulate sequences. Defaults to true.
*
-record/-r <true/t | false/f>: Set this option to
* false to prevent writing the schema changes to the
* current {@link SchemaFactory}.
*
* Actions can be composed in a comma-separated list. The various actions
* are as follows.
*
*
add: Bring the schema up-to-date with the latest
* changes to the schema XML data by adding tables, columns,
* indexes, etc. This action never drops any data. This is the
* default action.
*
retain: Keep all schema components in the schema XML, but
* drop the rest from the database. This action never adds any data.
*
drop: Drop all the schema components in the schema XML.
*
refresh: Equivalent to retain, then add.
*
createDB: Execute SQL to re-create the current database.
* This action is typically used in conjuction with the
* file option.
*
build: Execute SQL to build the schema defined in the XML.
* Because it doesn't take the current database schema into account,
* this action is typically used in conjuction with the
* file option.
*
reflect: Reflect on the current database schema. Write the
* schema's XML representation to the file specified with the
* file option, or to stdout if no file is given.
*
dropDB: Execute SQL to drop the current database. This
* action implies dropTables.
*
deleteTableContents: Execute SQL to delete all rows from
* all tables that OpenJPA knows about.
*
import: Import the given XML schema definition into the
* current {@link SchemaFactory}.
*
export: Export the current {@link SchemaFactory}'s recorded
* schema to an XML schema definition file.
*
* Examples:
*
*
Write a script to stdout to re-create the current database
* schema:
* java org.apache.openjpa.jdbc.schema.SchemaTool -f stdout
* -a createDB
*
Drop the current database schema:
* java org.apache.openjpa.jdbc.schema.SchemaTool
* -a dropDB
*
Refresh the schema and delete all records in all tables:
* java org.apache.openjpa.jdbc.schema.SchemaTool
* -a refresh,deleteTableContents
*
Create a schema based on an XML schema definition file:
* java org.apache.openjpa.jdbc.schema.SchemaTool
* myschema.xml
*
*/
public static void main(String[] args)
throws IOException, SQLException {
Options opts = new Options();
final String[] arguments = opts.setFromCmdLine(args);
boolean ret = Configurations.runAgainstAllAnchors(opts,
new Configurations.Runnable() {
@Override
public boolean run(Options opts) throws Exception {
JDBCConfiguration conf = new JDBCConfigurationImpl();
try {
return SchemaTool.run(conf, arguments, opts);
} finally {
conf.close();
}
}
});
if (!ret) {
// START - ALLOW PRINT STATEMENTS
System.out.println(_loc.get("tool-usage"));
// STOP - ALLOW PRINT STATEMENTS
}
}
/**
* Run the tool. Returns false if any invalid options were given.
*
* @see #main
*/
public static boolean run(JDBCConfiguration conf, String[] args,
Options opts)
throws IOException, SQLException {
Flags flags = new Flags();
flags.dropTables = opts.removeBooleanProperty
("dropTables", "dt", flags.dropTables);
flags.dropSequences = opts.removeBooleanProperty
("dropSequences", "dsq", flags.dropSequences);
flags.rollbackBeforeDDL = opts.removeBooleanProperty
("rollbackBeforeDDL", "rbddl", flags.rollbackBeforeDDL);
flags.ignoreErrors = opts.removeBooleanProperty
("ignoreErrors", "i", flags.ignoreErrors);
flags.openjpaTables = opts.removeBooleanProperty
("openjpaTables", "ot", flags.openjpaTables);
flags.primaryKeys = opts.removeBooleanProperty
("primaryKeys", "pk", flags.primaryKeys);
flags.foreignKeys = opts.removeBooleanProperty
("foreignKeys", "fks", flags.foreignKeys);
flags.indexes = opts.removeBooleanProperty
("indexes", "ix", flags.indexes);
flags.sequences = opts.removeBooleanProperty
("sequences", "sq", flags.sequences);
flags.record = opts.removeBooleanProperty("record", "r", flags.record);
String fileName = opts.removeProperty("file", "f", null);
String schemas = opts.removeProperty("s");
if (schemas != null)
opts.setProperty("schemas", schemas);
String[] actions = opts.removeProperty("action", "a", flags.action)
.split(",");
// setup a configuration instance with cmd-line info
Configurations.populateConfiguration(conf, opts);
// create script writer
ClassLoader loader = conf.getClassResolverInstance().
getClassLoader(SchemaTool.class, null);
flags.writer = Files.getWriter(fileName, loader);
boolean returnValue = true;
for (String action : actions) {
flags.action = action;
returnValue &= run(conf, args, flags, loader);
}
return returnValue;
}
/**
* Run the tool. Return false if invalid options were given.
*/
public static boolean run(JDBCConfiguration conf, String[] args,
Flags flags, ClassLoader loader)
throws IOException, SQLException {
Log log = conf.getLog(OpenJPAConfiguration.LOG_TOOL);
if (ACTION_REFLECT.equals(flags.action)) {
if (args.length > 0)
return false;
if (flags.writer == null)
flags.writer = new PrintWriter(System.out);
SchemaGenerator gen = new SchemaGenerator(conf);
gen.setPrimaryKeys(flags.primaryKeys);
gen.setIndexes(flags.indexes);
gen.setForeignKeys(flags.foreignKeys);
gen.setSequences(flags.sequences);
gen.setOpenJPATables(flags.openjpaTables);
String schemas = conf.getSchemas();
if (StringUtil.isEmpty(schemas))
schemas = "all";
log.info(_loc.get("sch-reflect", schemas));
gen.generateSchemas();
// record the schema
log.info(_loc.get("sch-reflect-write"));
SchemaSerializer ser = new XMLSchemaSerializer(conf);
ser.addAll(gen.getSchemaGroup());
ser.serialize(flags.writer, MetaDataSerializer.PRETTY);
return true;
}
if (args.length == 0
&& !ACTION_CREATEDB.equals(flags.action)
&& !ACTION_DROPDB.equals(flags.action)
&& !ACTION_EXPORT.equals(flags.action)
&& !ACTION_DELETE_TABLE_CONTENTS.equals(flags.action))
return false;
// parse in the arguments
SchemaParser parser = new XMLSchemaParser(conf);
parser.setDelayConstraintResolve(true);
File file;
for (String arg : args) {
file = Files.getFile(arg, loader);
log.info(_loc.get("tool-running", file));
parser.parse(file);
}
parser.resolveConstraints();
if (ACTION_IMPORT.equals(flags.action)) {
log.info(_loc.get("tool-import-store"));
SchemaGroup schema = parser.getSchemaGroup();
conf.getSchemaFactoryInstance().storeSchema(schema);
return true;
}
if (ACTION_EXPORT.equals(flags.action)) {
if (flags.writer == null)
flags.writer = new PrintWriter(System.out);
log.info(_loc.get("tool-export-gen"));
SchemaGroup schema = conf.getSchemaFactoryInstance().readSchema();
log.info(_loc.get("tool-export-write"));
SchemaSerializer ser = new XMLSchemaSerializer(conf);
ser.addAll(schema);
ser.serialize(flags.writer, MetaDataSerializer.PRETTY);
return true;
}
SchemaTool tool = new SchemaTool(conf, flags.action);
tool.setIgnoreErrors(flags.ignoreErrors);
tool.setDropTables(flags.dropTables);
tool.setSequences(flags.sequences); // set before dropseqs
tool.setDropSequences(flags.dropSequences);
tool.setRollbackBeforeDDL(flags.rollbackBeforeDDL);
tool.setPrimaryKeys(flags.primaryKeys);
tool.setForeignKeys(flags.foreignKeys);
tool.setIndexes(flags.indexes);
tool.setOpenJPATables(flags.openjpaTables);
if (args.length > 0)
tool.setSchemaGroup(parser.getSchemaGroup());
if (flags.writer != null)
tool.setWriter(flags.writer);
log.info(_loc.get("tool-action", flags.action));
try {
tool.run();
} finally {
if (flags.record) {
log.info(_loc.get("tool-record"));
tool.record();
}
}
if (flags.writer != null)
flags.writer.flush();
return true;
}
private void closeConnection(Connection conn) throws SQLException {
if (conn != null && !conn.isClosed()) {
conn.close();
}
}
/**
* Run flags.
*/
public static class Flags {
public String action = ACTION_ADD;
public Writer writer = null;
public boolean dropTables = true;
public boolean rollbackBeforeDDL = true;
public boolean dropSequences = true;
public boolean ignoreErrors = false;
public boolean openjpaTables = false;
public boolean primaryKeys = true;
public boolean foreignKeys = true;
public boolean indexes = true;
public boolean sequences = true;
public boolean record = true;
}
}