All Downloads are FREE. Search and download functionalities are using the official Maven repository.

fr.dyade.aaa.util.DBTransaction Maven / Gradle / Ivy

There is a newer version: 5.22.0-EFLUID
Show newest version
/*
 * Copyright (C) 2006 - 2024 ScalAgent Distributed Technologies
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or any later version.
 * 
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 * 
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
 * USA.
 *
 * Initial developer(s): ScalAgent Distributed Technologies
 * Contributor(s): 
 */
package fr.dyade.aaa.util;

import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.SQLIntegrityConstraintViolationException;
import java.sql.Statement;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.Enumeration;
import java.util.Map;
import java.util.Hashtable;

import org.objectweb.util.monolog.api.BasicLevel;

import fr.dyade.aaa.agent.AgentServer;
import fr.dyade.aaa.common.Pool;
import fr.dyade.aaa.util.backup.BackupFile;
import fr.dyade.aaa.util.backup.BackupRecord;

/**
 *  The DBTransaction class implements an atomic storage through
 * a JDBC interface. This class is designed to be specialized for different
 * database implementation.
 * 

* Be Careful, the configuration properties don't work for the transaction component: * these properties are saved in the transaction repository so they can not be used to * configure it. * * @see Transaction * @see MySQLDBTransaction * @see DerbyDBTransaction */ public abstract class DBTransaction extends AbstractTransaction implements DBTransactionMBean { public static final String DB_TRANSACTION_PREFIX = "org.ow2.joram.dbtransaction"; public static final String TABLE_NAME_PROP = DB_TRANSACTION_PREFIX + ".dbtable"; public static final String DFLT_TABLE_PREFIX = "JoramDB"; protected String dbtable; @Override public String getDBTableName() { return dbtable; } /** * Number of pooled operation, by default 1000. * This value can be adjusted for a particular server by setting * DBLogThresholdOperation specific property. *

* This property can be fixed only from java launching * command, or through System.property method. */ static int logThresholdOperation = 1000; /** * Returns the pool size for operation objects, by default 1000. * * @return The pool size for operation objects. */ @Override public int getLogThresholdOperation() { return logThresholdOperation; } protected Connection conn = null; protected String dbinsert = null; protected String dbupdate = null; protected String dbload = null; protected String dbdelete = null; protected String dbclose = null; private PreparedStatement insertStmt = null; private PreparedStatement updateStmt = null; private PreparedStatement loadStmt = null; private PreparedStatement deleteStmt = null; @Override public String getDBInsertStatement() { return dbinsert; } @Override public String getDBUpdateStatement() { return dbupdate; } @Override public String getDBLoadStatement() { return dbload; } @Override public String getDBDeleteStatement() { return dbdelete; } @Override public String getDBCloseStatement() { return dbclose; } protected DBTransaction() {} public void initRepository() throws IOException { dbtable = AgentServer.getProperty(TABLE_NAME_PROP, DFLT_TABLE_PREFIX + AgentServer.getServerId()); initDB(); createPreparedStatement(); } private void createPreparedStatement() throws IOException { try { if (dbinsert == null) insertStmt = conn.prepareStatement("INSERT INTO " + dbtable + " VALUES (?, ?)"); else insertStmt = conn.prepareStatement(dbinsert); if (dbupdate == null) updateStmt = conn.prepareStatement("UPDATE " + dbtable + " SET content=? WHERE name=?"); else updateStmt = conn.prepareStatement(dbupdate); if (dbload == null) loadStmt = conn.prepareStatement("SELECT content FROM " + dbtable + " WHERE name=?"); else loadStmt = conn.prepareStatement(dbload); if (dbdelete == null) deleteStmt = conn.prepareStatement("DELETE FROM " + dbtable + " WHERE name=?"); else deleteStmt = conn.prepareStatement(dbdelete); } catch (SQLException sqle) { if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "Cannot create statements", sqle); throw new IOException(sqle.getMessage()); } } /** * Instantiates the database driver and creates the table if necessary * @throws IOException an error occurs. */ protected abstract void initDB() throws IOException; /** * Returns the path of persistence directory. * * @return The path of persistence directory. */ public String getPersistenceDir() { return dir.getPath(); } protected final void setPhase(int newPhase) { phase = newPhase; } /** * Number of commit operation since starting up. */ private int commitCount = 0; /** * Returns the number of commit operation since last boot. * * @return The number of commit operation. */ @Override public final int getCommitCount() { return commitCount; } /** * Number of bytes written in commit. */ private long commitBytes = 0L; /** * Returns the number of bytes written to database since last boot. * * @return The number of commit operation. */ @Override public final long getCommitBytes() { return commitBytes; } /** * Returns an array of strings naming the persistent objects denoted by * a name that satisfy the specified prefix. Each string is an object name. * * @param prefix the prefix * @return An array of strings naming the persistent objects * denoted by a name that satisfy the specified prefix. The * array will be empty if no names match. */ public final synchronized String[] getList(String prefix) { try (Statement s = conn.createStatement()) { // Creating a statement lets us issue commands against the connection. ResultSet rs = s.executeQuery("SELECT name FROM " + dbtable + " WHERE name LIKE '" + prefix + "%'"); ArrayList v = new ArrayList<>(); while (rs.next()) { v.add(rs.getString(1)); } rs.close(); String[] result = new String[v.size()]; result = v.toArray(result); if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "DBTransaction, getList: " + v); return result; } catch (SQLException sqle) { logmon.log(BasicLevel.DEBUG, "DBTransaction.getList()", sqle); } return null; } public byte[] getFromLog(String name) throws IOException { if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "DBTransaction, getFromLog(" + name + ")"); // Searchs in the log a new value for the object. Hashtable log = perThreadContext.get().getLog(); DBOperation op = log.get(name); if ((op != null) && ((op.type == DBOperation.SAVE) || (op.type == DBOperation.CREATE))) { return op.value; } return null; } /** * Returns true if this Transaction implementation implements an optimized loadAll method. * * @return false. */ @Override public boolean useLoadAll() { return true; } /** * Fills the map with all objects of the component whose name begins with the prefix. * * @param prefix The prefix of searched objects. * @param map The map of corresponding objects. */ @Override public void loadAll(String prefix, Map map) { if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "DBTransaction, loadAll(" + prefix + ")"); try (Statement s = conn.createStatement()) { // Creating a statement lets us issue commands against the connection. ResultSet rs = s.executeQuery("SELECT name, content FROM " + dbtable + " WHERE ((name LIKE '" + prefix + "%') AND (name NOT LIKE '%B'))"); while (rs.next()) { String name = rs.getString(1); // Try to retrieve content from memory log. byte[] content = null; try { content = getFromLog(name); } catch (IOException exc) { if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.WARN, "DBTransaction, loadAll: cannot retrieve content for " + name, exc); else logmon.log(BasicLevel.WARN, "DBTransaction, loadAll: cannot retrieve content for " + name + " - " + exc.getMessage()); } // If content is not present in memory log get it from DB. if (content == null) content = rs.getBytes(2); if (content == null) { logmon.log(BasicLevel.WARN, "DBTransaction, loadAll: no content for " + name); continue; } // Deserializes the object and adds it to the list. try { map.put(name, loadFromByteArray(content)); } catch (ClassNotFoundException | IOException exc) { if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.WARN, "DBTransaction, loadAll: cannot retrieve content for " + name, exc); else logmon.log(BasicLevel.WARN, "DBTransaction, loadAll: cannot retrieve content for " + name + " - " + exc.getMessage()); } } rs.close(); if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "DBTransaction, loadAll: " + map.size()); return; } catch (SQLException sqle) { logmon.log(BasicLevel.DEBUG, "DBTransaction.getList()", sqle); } return; } final String fname(String dirName, String name) { if (dirName == null) return name; return new StringBuffer(dirName).append('/').append(name).toString(); } protected final void saveInLog(byte[] buf, String dirName, String name, Hashtable log, boolean copy, boolean first) throws IOException { String fname = fname(dirName, name); if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "DBTransaction, saveInLog(" + fname + ", " + copy + ", " + first + ")"); DBOperation op = DBOperation.alloc(DBOperation.SAVE, fname, buf); if (first) op = DBOperation.alloc(Operation.CREATE, fname, buf); else op = DBOperation.alloc(Operation.SAVE, fname, buf); DBOperation old = (DBOperation) log.put(fname, op); // Bug fix: JMQ-215 if (first && (old != null) && (old.type == Operation.DELETE)) op.type = Operation.SAVE; if (copy) { if ((old != null) && (old.type == DBOperation.SAVE) && (old.value.length == buf.length)) { // reuse old buffer op.value = old.value; } else { // alloc a new one op.value = new byte[buf.length]; } System.arraycopy(buf, 0, op.value, 0, buf.length); } if (old != null) old.free(); } public byte[] loadByteArray(String dirName, String name) throws IOException { String fname = fname(dirName, name); if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "DBTransaction, loadByteArray(" + fname + ")"); // Searches in the log a new value for the object. Hashtable log = perThreadContext.get().getLog(); DBOperation op = log.get(fname); if (op != null) { if ((op.type == DBOperation.SAVE) || (op.type == DBOperation.CREATE)) { return op.value; } else if (op.type == DBOperation.DELETE) { // TODO (AF): Operation.NOOP? // The object was deleted. return null; } } try { loadStmt.setString(1, fname); ResultSet rs = loadStmt.executeQuery(); if (!rs.next()) return null; byte[] content = rs.getBytes(1); rs.close(); return content; } catch (SQLException sqle) { throw new IOException(sqle.getMessage()); } } public void delete(String dirName, String name) { String fname = fname(dirName, name); if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "DBTransaction, delete(" + fname + ")"); Hashtable log = perThreadContext.get().getLog(); DBOperation op = DBOperation.alloc(DBOperation.DELETE, fname); DBOperation old = log.put(fname, op); if (old != null) { if (old.type == Operation.CREATE) op.type = Operation.NOOP; old.free(); } } /** * This method try to reconnect to the database, it is used in case of failure during commit. * Be careful, unless a connection pool is used, we need to recreate the prepared statement. * * @throws IOException The reconnection has failed. */ protected abstract void connectDB() throws IOException; protected final int JDBC_CONNECT_RETRY_COUNT_DFLT = 5; protected int connectRetryCount = JDBC_CONNECT_RETRY_COUNT_DFLT; protected final long JDBC_CONNECT_RETRY_MAX_PERIOD_DFLT = 60000L; protected long connectRetryMaxPeriod = JDBC_CONNECT_RETRY_MAX_PERIOD_DFLT; protected final long JDBC_CONNECT_RETRY_MIN_DELAY_DFLT = 1000L; protected long connectRetryMinDelay = JDBC_CONNECT_RETRY_MIN_DELAY_DFLT; public final synchronized void commit(boolean release) throws IOException { if (phase != RUN) throw new IllegalStateException("Can not commit."); if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "DBTransaction, commit"); Hashtable log = perThreadContext.get().getLog(); if (! log.isEmpty()) { boolean completed = false; int retry = 0; long startRetry = 0L; long lastTry = 0L; SQLException lastexc = null; do { try { // TODO (AF): The dbLogCommit is currently done after each connection attempt, even if the // connection fails. We should set a boolean in the catch clause of connectDB and execute the // dbLogCommit only if the connection is successful. dbLogCommit(log); lastexc = null; completed = true; } catch (SQLException sqle) { // TODO (AF):It is assumed that the error is due to a loss of connection with the database, so the // corrective action is to reestablish the connection and replay the transaction. // If the error is linked to the actions carried out during the transaction there is no solution, // however the problem could come from the database itself (saturation of the space for example, // transient or permanent), in this case the reconnection mechanism is not useful, or at least it // would be necessary to close it first. lastexc = sqle; retry += 1; long time = System.currentTimeMillis(); if (startRetry == 0L) startRetry = time; if ((connectRetryCount > 0) && (retry > connectRetryCount)) { logmon.log(BasicLevel.WARN, "DBTransaction, commit: maximum number of reconnection attempts reached: " + connectRetryCount); break; } if ((connectRetryMaxPeriod > 0L) && ((time - startRetry) > connectRetryMaxPeriod)) { logmon.log(BasicLevel.WARN, "DBTransaction, commit: reconnection period exceeded: " + connectRetryMaxPeriod); break; } try { // Do not wait the 1st time. if (lastTry != 0L) { // Wait for at least configured delay since last attempt to connect long delay = connectRetryMinDelay - (time - lastTry); try { if (delay > 0) Thread.sleep(delay); } catch (InterruptedException exc) {} } if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.WARN, "DBTransaction, commit: try to reconnect", sqle); else logmon.log(BasicLevel.WARN, "DBTransaction, commit: try to reconnect - " + sqle.getMessage()); lastTry = System.currentTimeMillis(); connectDB(); createPreparedStatement(); logmon.log(BasicLevel.DEBUG, "DBTransaction, commit: DB connected"); } catch (IOException exc) { logmon.log(BasicLevel.INFO, "DBTransaction, commit: cannot reconnect" + exc.getMessage()); } } } while (!completed); if (lastexc != null) throw new IOException(lastexc.getMessage()); if (startRetry != 0) { // There was a reconnection process in progress, logs a message to signify its end. logmon.log(BasicLevel.WARN, "DBTransaction, commit: reconnected"); } // All operations are committed in database, frees them and clear the log. log.forEach((k, op) -> { op.free(); }); log.clear(); } if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "DBTransaction, committed"); if (release) { // Change the transaction state and save it. setPhase(FREE); notify(); } else { setPhase(COMMIT); } } /** * Executes all SQL statements corresponding to log operations, and the commit. * * @param log Hashtable containing all operations to commit. * @throws SQLException an error occurs. */ private void dbLogCommit(Hashtable log) throws SQLException { try { dbLogExecute(log); } catch (SQLException sqle) { conn.rollback(); throw sqle; } try { conn.commit(); commitCount += 1; } catch (SQLException sqle) { throw sqle; } } int nbinserts = 0; int badinserts = 0; /** * Returns the number of inserts in database since last boot. * * @return The number of inserts in database. */ @Override public final int getNbInserts() { return nbinserts; } /** * Returns the number of bad inserts in database since last boot (record already existing). * * @return The number of bad insert in database. */ @Override public final int getBadInserts() { return badinserts; } int nbupdates = 0; int badupdates = 0; /** * Returns the number of updates in database since last boot. * * @return The number of updates in database. */ @Override public final int getNbUpdates() { return nbupdates; } /** * Returns the number of bad updates in database since last boot (record needing to be inserted).. * * @return The number of bad updates in database. */ @Override public int getBadUpdates() { return badupdates; } int nbdeletes = 0; int nbnoop; /** * Returns the number of deletes in database since last boot. * * @return The number of deletes in database. */ @Override public final int getNbDeletes() { return nbdeletes; } /** * Executes all SQL statements corresponding to log operations. * * @param log Hashtable containing all operations to commit. * @throws SQLException an error occurs. */ private void dbLogExecute(Hashtable log) throws SQLException { DBOperation op = null; for (Enumeration e = log.elements(); e.hasMoreElements(); ) { op = e.nextElement(); if (op.type == DBOperation.CREATE) { // TODO (AF): Should handle SAVE (update then insert) and CREATE (insert then update) differently. commitBytes += op.value.length; if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "DBTransaction, dbLogCommit.create(" + op.name + ", " + op.value.length + ") -> " + commitBytes); try { nbinserts ++; insertStmt.setString(1, op.name); insertStmt.setBytes(2, op.value); insertStmt.executeUpdate(); } catch (SQLException sqle1) { if (! (sqle1 instanceof SQLIntegrityConstraintViolationException)) { logmon.log(BasicLevel.WARN, "DBTransaction, dbLogCommit.create(" + op.name + ") -> ", sqle1); // TODO (AF): Should throw the exception!! } badinserts ++; logmon.log(BasicLevel.INFO, "DBTransaction, dbLogCommit.create(" + op.name + ") -> needs update"); try { nbupdates ++; updateStmt.setBytes(1, op.value); updateStmt.setString(2, op.name); updateStmt.executeUpdate(); } catch (SQLException sqle) { throw sqle; } } } else if (op.type == DBOperation.SAVE) { // TODO (AF): Should handle SAVE (update then insert) and CREATE (insert then update) differently. commitBytes += op.value.length; if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "DBTransaction, dbLogCommit.save(" + op.name + ", " + op.value.length + ") -> " + commitBytes); int rows = -1; try { nbupdates ++; updateStmt.setBytes(1, op.value); updateStmt.setString(2, op.name); rows = updateStmt.executeUpdate(); } catch (SQLException sqle) { throw sqle; } if (rows != 1) { nbinserts ++; logmon.log(BasicLevel.INFO, "DBTransaction, dbLogCommit.save(" + op.name + ") needs insert -> " + rows); badupdates ++; try { insertStmt.setString(1, op.name); insertStmt.setBytes(2, op.value); insertStmt.executeUpdate(); } catch (SQLException sqle1) { // Note (AF): SQLIntegrityConstraintViolationException should never happen. if (! (sqle1 instanceof SQLIntegrityConstraintViolationException)) { logmon.log(BasicLevel.WARN, "DBTransaction, dbLogCommit.save(" + op.name + ") -> ", sqle1); // TODO (AF): Should throw the exception!! } } } } else if (op.type == DBOperation.DELETE) { if (logmon.isLoggable(BasicLevel.DEBUG)) logmon.log(BasicLevel.DEBUG, "DBTransaction, dbLogCommit.delete(" + op.name + ')'); try { nbdeletes ++; deleteStmt.setString(1, op.name); deleteStmt.executeUpdate(); } catch (SQLException sqle) { throw sqle; } } else if (op.type == DBOperation.NOOP) { nbnoop ++; // TOD (AF): To remove!! logmon.log(BasicLevel.DEBUG, "DBTransaction, dbLogCommit.noop(" + op.name + ')'); } } } /** * Stops the transaction module. * It waits all transactions termination, then the module is kept * in a FREE 'ready to use' state. */ public synchronized void stop() { if (logmon.isLoggable(BasicLevel.INFO)) logmon.log(BasicLevel.INFO, "DBTransaction, stops"); while (phase != FREE) { // Wait for the transaction subsystem to be free try { wait(); } catch (InterruptedException exc) { } } setPhase(FINALIZE); Statement s = null; try { // TODO (AF): We do not execute the close statement if we wish restart the module // without a DBInit. if ((dbclose != null) && (dbclose.length() > 0)) { // Creating a statement lets us issue commands against the connection. s = conn.createStatement(); s.executeUpdate(dbclose); } } catch (SQLException sqle) { logmon.log(BasicLevel.ERROR, "DBTransaction.stop()", sqle); } catch (Throwable t) { logmon.log(BasicLevel.ERROR, "DBTransaction.stop()", t); } finally { try { if (s != null) s.close(); } catch (SQLException sqle) { logmon.log(BasicLevel.ERROR, "DBTransaction.stop()", sqle); } logmon.log(BasicLevel.INFO, "DBTransaction.stop(), stopped"); } setPhase(FREE); if (logmon.isLoggable(BasicLevel.INFO)) { logmon.log(BasicLevel.INFO, "DBTransaction, stopped: " + commitCount + '(' + commitBytes + "), " + nbinserts + '(' + badinserts + "), " + nbupdates + '(' + badupdates + "), " + nbdeletes + '(' + nbnoop +')'); } } /** * Close the transaction module. * It waits all transactions termination, the module will be initialized * anew before reusing it. */ public synchronized void close() { if (logmon.isLoggable(BasicLevel.INFO)) logmon.log(BasicLevel.INFO, "DBTransaction, close"); if (phase == INIT) return; while (phase != FREE) { // Wait for the transaction subsystem to be free try { wait(); } catch (InterruptedException exc) { } } setPhase(FINALIZE); Statement s = null; try { insertStmt.close(); updateStmt.close(); loadStmt.close(); deleteStmt.close(); if ((dbclose != null) && (dbclose.length() > 0)) { // Creating a statement lets us issue commands against the connection. s = conn.createStatement(); s.executeUpdate(dbclose); } conn.commit(); } catch (SQLException sqle) { logmon.log(BasicLevel.ERROR, "DBTransaction.close()", sqle); } catch (Throwable t) { logmon.log(BasicLevel.ERROR, "DBTransaction.close()", t); } finally { try { if (s != null) s.close(); } catch (SQLException sqle) { logmon.log(BasicLevel.ERROR, "DBTransaction.stop()", sqle); } try { conn.close(); } catch (SQLException sqle) { logmon.log(BasicLevel.ERROR, "DBTransaction.close(), closing driver", sqle); } logmon.log(BasicLevel.INFO, "DBTransaction.close(), stopped"); } setPhase(INIT); if (logmon.isLoggable(BasicLevel.INFO)) { logmon.log(BasicLevel.INFO, "DBTransaction, closed"); } } @Override public String dumpProperties() { StringBuilder strbuf = new StringBuilder(); strbuf.append('['); dumpProperties(strbuf); strbuf.append(']'); return strbuf.toString(); } protected void dumpProperties(StringBuilder strbuf) { strbuf.append('(').append("dbtable=").append(dbtable); } /** * Backups the content of Transaction module. * Produces a generic Backup/Restore file containing all living objects. * * @param path Directory path to store the backup. * @return the name of created backup file. * @throws Exception An error occurs during backup. */ @Override public synchronized String backup(String path) throws Exception { logmon.log(BasicLevel.DEBUG, "DBTransaction, backup"); if ((path == null) || path.isEmpty()) path = "."; BackupFile backupFile = null; try (Statement s = conn.createStatement()) { // Creating a statement lets us issue commands against the connection. long start = System.currentTimeMillis(); SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd-HHmmss"); String backupName = "backup-" + format.format(new Date()); // Creates the BackupFile and temporary directory File backupTempFile = new File(path, backupName + ".temp"); backupFile = new BackupFile(backupTempFile); logmon.log(BasicLevel.INFO, "DBTransaction, backup: " + path); ResultSet rs = s.executeQuery("SELECT name, content FROM " + dbtable); while (rs.next()) { String name = rs.getString(1); byte[] content = rs.getBytes(2); int idx = name.lastIndexOf('/'); String dirName = null; if (idx != -1) { dirName = name.substring(0, idx); name = name.substring(idx+1); } backupFile.backup(new BackupRecord(dirName, name, content)); } backupFile.close(); rs.close(); File resultFile = new File(path, backupName + ".tbck"); if (!backupTempFile.renameTo(resultFile)) { logmon.log(BasicLevel.WARN, "DBTransaction, backup phase2: Cannot rename " + backupTempFile.getCanonicalPath()); return backupTempFile.getName(); } long end = System.currentTimeMillis(); logmon.log(BasicLevel.INFO, "DBTransaction, backup end: " + (end - start)); return resultFile.getName(); } catch (Exception exc) { logmon.log(BasicLevel.ERROR, "DBTransaction, backup error", exc); if (backupFile != null) try { backupFile.close(); } catch (IOException e) {} throw exc; } } } final class DBOperation implements Serializable { /** define serialVersionUID for interoperability */ private static final long serialVersionUID = 1L; static final int SAVE = 1; static final int CREATE = 4; static final int DELETE = 2; static final int NOOP = 5; // Create then delete static final int COMMIT = 3; static final int END = 127; int type; String name; byte[] value; private DBOperation(int type, String name, byte[] value) { this.type = type; this.name = name; this.value = value; } /** * Returns a string representation for this object. * * @return A string representation of this object. */ public String toString() { StringBuffer strbuf = new StringBuffer(); strbuf.append('(').append(super.toString()); strbuf.append(",type=").append(type); strbuf.append(",name=").append(name); strbuf.append(')'); return strbuf.toString(); } private static Pool pool = null; static { pool = new Pool("DBTransaction$Operation", Integer.getInteger("DBLogThresholdOperation", DBTransaction.logThresholdOperation).intValue()); } static DBOperation alloc(int type, String name) { return alloc(type, name, null); } static DBOperation alloc(int type, String name, byte[] value) { DBOperation op = null; try { op = (DBOperation) pool.allocElement(); } catch (Exception exc) { return new DBOperation(type, name, value); } op.type = type; op.name = name; op.value = value; return op; } void free() { /* to let gc do its work */ name = null; value = null; pool.freeElement(this); } }





© 2015 - 2024 Weber Informatics LLC | Privacy Policy