org.refcodes.logger.alt.simpledb.SimpleDbLogger Maven / Gradle / Ivy
Show all versions of refcodes-logger-alt-simpledb Show documentation
// /////////////////////////////////////////////////////////////////////////////
// REFCODES.ORG
// =============================================================================
// This code is copyright (c) by Siegfried Steiner, Munich, Germany, distributed
// on an "AS IS" BASIS WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, and licen-
// sed under the following (see "http://en.wikipedia.org/wiki/Multi-licensing")
// licenses:
// =============================================================================
// GNU General Public License, v3.0 ("http://www.gnu.org/licenses/gpl-3.0.html")
// together with the GPL linking exception applied; as being applied by the GNU
// Classpath ("http://www.gnu.org/software/classpath/license.html")
// =============================================================================
// Apache License, v2.0 ("http://www.apache.org/licenses/TEXT-2.0")
// =============================================================================
// Please contact the copyright holding author(s) of the software artifacts in
// question for licensing issues not being covered by the above listed licenses,
// also regarding commercial licensing models or regarding the compatibility
// with other open source licenses.
// /////////////////////////////////////////////////////////////////////////////
package org.refcodes.logger.alt.simpledb;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
import java.util.logging.Level;
import org.refcodes.component.Component;
import org.refcodes.component.Decomposable;
import org.refcodes.component.Flushable;
import org.refcodes.component.Initializable;
import org.refcodes.component.InitializeException;
import org.refcodes.controlflow.RetryCounter;
import org.refcodes.data.IoRetryCount;
import org.refcodes.data.LatencySleepTime;
import org.refcodes.data.LoopExtensionTime;
import org.refcodes.data.ScheduleSleepTime;
import org.refcodes.exception.Trap;
import org.refcodes.generator.UniqueIdGeneratorSingleton;
import org.refcodes.logger.IllegalRecordRuntimeException;
import org.refcodes.logger.Logger;
import org.refcodes.logger.UnexpectedLogRuntimeException;
import org.refcodes.tabular.Column;
import org.refcodes.tabular.ColumnFactory;
import org.refcodes.tabular.ColumnMismatchException;
import org.refcodes.tabular.Header;
import org.refcodes.tabular.HeaderImpl;
import org.refcodes.tabular.HeaderMismatchException;
import org.refcodes.tabular.Records;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.simpledb.model.BatchPutAttributesRequest;
import com.amazonaws.services.simpledb.model.CreateDomainRequest;
import com.amazonaws.services.simpledb.model.DeleteDomainRequest;
import com.amazonaws.services.simpledb.model.DomainMetadataRequest;
import com.amazonaws.services.simpledb.model.NoSuchDomainException;
import com.amazonaws.services.simpledb.model.ReplaceableAttribute;
import com.amazonaws.services.simpledb.model.ReplaceableItem;
/**
* The {@link SimpleDbLogger} is the Amazon SimpleDB implementation of the
* {@link Logger} interface. As Amazon SimpleDB stores only {@link String}
* values (everything is a {@link String}), type information may be lost as
* inferencing from the {@link String} content may not be possible. The
* {@link Record} instances {@link Column#toStorageString(Object)} and
* {@link Column#fromStorageString(String)} methods are used to apply
* conversion.
*
* ATTENTION: Logging field values exceeding the size of 1024 characters will be
* truncated and an error message is logged out. We assume not having fields
* longer than 1024 bytes with this implementation of the {@link Logger}.
*
* @param The type of the {@link Record} instances managed by the
* {@link Logger}.
*/
public class SimpleDbLogger extends AbstractSimpleDbClient implements Logger, Component, Initializable, Decomposable, Flushable {
// /////////////////////////////////////////////////////////////////////////
// STATICS:
// /////////////////////////////////////////////////////////////////////////
private static final java.util.logging.Logger LOGGER = java.util.logging.Logger.getLogger( SimpleDbLogger.class.getName() );
// /////////////////////////////////////////////////////////////////////////
// CONSTANTS:
// /////////////////////////////////////////////////////////////////////////
private static final int BUFFER_WRITE_SIZE = 23; // 24 seems to be the
private static final int FIELD_MAX_SIZE = 1024;
// /////////////////////////////////////////////////////////////////////////
// VARIABLES:
// /////////////////////////////////////////////////////////////////////////
private final String _baseItemName = UniqueIdGeneratorSingleton.getInstance().next();
private int _itemNameCounter = 0;
private final List _replaceableItemBuffer = new ArrayList<>();
private Timer _bufferTimer;
private ColumnFactory _columnFactory;
private final Header _header = new HeaderImpl<>();
// /////////////////////////////////////////////////////////////////////////
// CONSTRUCTORS:
// /////////////////////////////////////////////////////////////////////////
/**
* Constructs the {@link SimpleDbLogger} for a given SimpleDB domain.
*
* @param aDomainName The name for the Amazon SimpleDB domain
* @param aAccessKey The Amazon access key for Amazon SimpleDB
* @param aSecretKey The Amazon secret key for Amazon SimpleDB
* @param aColumnFactory The {@link ColumnFactory} to create default
* {@link Column} instances for {@link Record} instances to be
* processed
*/
public SimpleDbLogger( String aDomainName, String aAccessKey, String aSecretKey, ColumnFactory aColumnFactory ) {
this( aDomainName, aAccessKey, aSecretKey, null, aColumnFactory );
}
/**
* Constructs the {@link SimpleDbLogger} for a given SimpleDB domain.
*
* @param aDomainName The name for the Amazon SimpleDB domain
* @param aAccessKey The Amazon access key for Amazon SimpleDB
* @param aSecretKey The Amazon secret key for Amazon SimpleDB
* @param aEndPoint The end-point (Amazon region) to use (see
* {@link AbstractSimpleDbClient}'s constructor documentation for
* possible values).
* @param aColumnFactory The {@link ColumnFactory} to create default
* {@link Column} instances for {@link Record} instances to be
* processed
*/
public SimpleDbLogger( String aDomainName, String aAccessKey, String aSecretKey, String aEndPoint, ColumnFactory aColumnFactory ) {
super( aDomainName, aAccessKey, aSecretKey );
_bufferTimer = new Timer( true );
_bufferTimer.schedule( new BufferDaemon(), ScheduleSleepTime.NORM_SCHEDULE_SLEEP_TIME_IN_MS.getTimeMillis(), ScheduleSleepTime.NORM_SCHEDULE_SLEEP_TIME_IN_MS.getTimeMillis() );
_columnFactory = aColumnFactory;
}
// /////////////////////////////////////////////////////////////////////////
// METHODS:
// /////////////////////////////////////////////////////////////////////////
/**
* Log a {@link org.refcodes.tabular.Record} instance.
*
* @param aRecord the {@link org.refcodes.tabular.Record} instance to be
* logged.
*
* @throws IllegalRecordRuntimeException thrown in case the record cannot be
* logged as a specific implementation might expect some dedicated
* {@link Column} instances to be contained in the provided Record.
* @throws UnexpectedLogRuntimeException thrown in case some other problems
* regarding logging occurred, e.g. the data sink (physical system
* where to log to) experiences problems.
*/
@Override
public void log( org.refcodes.tabular.Record extends T> aRecord ) {
aRecord = (org.refcodes.tabular.Record extends T>) aRecord.toPurged();
if ( aRecord.isEmpty() ) {
LOGGER.info( "Ignoring record \"" + aRecord.toString() + "\" to be logged for Amazon SimpleDB domain \"" + getAmazonSimpleDbDomainName() + "\" as it is empty." );
}
else {
LOGGER.info( "Logging record \"" + aRecord.toString() + "\" for Amazon SimpleDB domain \"" + getAmazonSimpleDbDomainName() + "\"." );
// -------------------------------
// Log the fields to the SimpleDB:
// -------------------------------
final ReplaceableItem theReplaceableItem = new ReplaceableItem( nextItemName() );
final List theReplaceableAttributes = new ArrayList<>();
for ( String eKey : aRecord.keySet() ) {
// ------------------------------------
// Add next known column key to header:
// ------------------------------------
addHeaderColumn( eKey );
}
LOGGER.log( Level.FINE, "Processed incoming record to be: \"" + aRecord.toString() + "\"" );
// ------------------------------------------------------------
// Convert record to default PK key and PK value string record:
// ------------------------------------------------------------
final org.refcodes.tabular.Record> theRecord;
try {
theRecord = _header.toStorageString( aRecord );
}
catch ( HeaderMismatchException | ColumnMismatchException aException ) {
throw new IllegalRecordRuntimeException( Trap.asMessage( aException ), aRecord, aException );
}
LOGGER.log( Level.FINE, "String processed record to be: \"" + theRecord.toString() + "\"" );
Object eValue;
Object[] eValues;
String eToValue;
for ( String eKey : theRecord.keySet() ) {
// ----------------------------------------------------
// Set up our header for type to string conversion:
// ----------------------------------------------------
addHeaderColumn( eKey );
eValue = theRecord.get( eKey );
if ( eValue != null ) {
// @formatter:off
// Array values:
if ( eValue.getClass().isArray() ) {
eValues = (Object[]) eValue;
for ( Object eValue1 : eValues ) {
eToValue = eValue1.toString();
eToValue = toTruncatedField( eKey, eToValue );
if ( eToValue.length() != 0 ) {
theReplaceableAttributes.add( new ReplaceableAttribute( eKey, eToValue, false ) );
}
}
}
// Plain values:
else {
eToValue = toTruncatedField( eKey, eValue.toString() );
if ( eToValue.length() != 0 ) {
theReplaceableAttributes.add( new ReplaceableAttribute( eKey, eToValue, false ) );
}
}
// @formatter:on
}
}
writeBuffer( theReplaceableItem.withAttributes( theReplaceableAttributes ) );
}
}
// /////////////////////////////////////////////////////////////////////////
// COMPONENT:
// /////////////////////////////////////////////////////////////////////////
/**
* {@inheritDoc}
*/
@Override
public void initialize() throws InitializeException {
LOGGER.info( "Initializing component \"" + getClass().getName() + "\" for domain \"" + getAmazonSimpleDbDomainName() + "\"." );
// ---------------------------------------------------------------------
// Amazon writes: "... CreateDomain is an idempotent operation; running
// it multiple times using the same domain name will not result in an
// error response... "We create the domain upon initialization as we do
// not have means to test whether it already exists. Creating it again
// will leave the domain as is.
// ---------------------------------------------------------------------
try {
final DomainMetadataRequest domainMetadataRequest = new DomainMetadataRequest( getAmazonSimpleDbDomainName() );
getAmazonSimpleDbClient().domainMetadata( domainMetadataRequest );
}
catch ( NoSuchDomainException e ) {
LOGGER.info( "Creating non existing domain \"" + getAmazonSimpleDbDomainName() + "\"..." );
final CreateDomainRequest theCreateDomainRequest = new CreateDomainRequest( getAmazonSimpleDbDomainName() );
getAmazonSimpleDbClient().createDomain( theCreateDomainRequest );
}
}
/**
* {@inheritDoc}
*/
@Override
public synchronized void destroy() {
LOGGER.info( "Destroying component \"" + getClass().getName() + "\" for domain \"" + getAmazonSimpleDbDomainName() + "\"." );
flushBuffer();
if ( _bufferTimer != null ) {
_bufferTimer.cancel();
_bufferTimer = null;
}
}
/**
* {@inheritDoc}
*/
@Override
public void decompose() {
LOGGER.info( "Decomposing (deleting) \"" + getClass().getName() + "\" component for domain \"" + getAmazonSimpleDbDomainName() + "\"..." );
getAmazonSimpleDbClient().deleteDomain( new DeleteDomainRequest( getAmazonSimpleDbDomainName() ) );
}
/**
* {@inheritDoc}
*/
@Override
public void flush() throws IOException {
LOGGER.info( "Flushing \"" + getClass().getName() + "\" component for domain \"" + getAmazonSimpleDbDomainName() + "\"..." );
flushBuffer();
}
// /////////////////////////////////////////////////////////////////////////
// HOOKS:
// /////////////////////////////////////////////////////////////////////////
/**
* Provides access to the {@link Header} member variable required for
* {@link Record} related operation.
*
* @return The {@link Header}.
*/
protected Header getHeader() {
return _header;
}
/**
* Adds a key to the dynamically created {@link Header} for reducing object
* creation overhead when massively logging data as no {@link Column}
* instances are created once the key was already added.
*
* @param aKey The key for which a {@link Column} is to be added.
*/
protected void addHeaderColumn( String aKey ) {
if ( !_header.containsKey( aKey ) ) {
try {
final Column theColumn = _columnFactory.create( aKey );
_header.add( theColumn );
}
catch ( IllegalArgumentException e ) {
LOGGER.log( Level.WARNING, "Cannot add column header for key <" + aKey + "> (probably encountered a race ) as of: " + Trap.asMessage( e ), e );
}
}
}
// /////////////////////////////////////////////////////////////////////////
// HELPER:
// /////////////////////////////////////////////////////////////////////////
/**
* Truncates the given field to fit into the max allowed size for fields of
* the Amazon SimpleDB.
*
* @param eKey The key of the field to be truncated.
* @param eValue The value to be tested whether it is to be truncated.
*
* @return The value as is if not to be truncated or the truncated value.
*/
private String toTruncatedField( String eKey, String eValue ) {
if ( eValue.length() >= FIELD_MAX_SIZE ) {
LOGGER.log( Level.WARNING, "The field with key \"" + eKey + "\" exceeds the max. allowed size of <" + FIELD_MAX_SIZE + "> characters by <\"" + ( eValue.length() - FIELD_MAX_SIZE ) + "\"> characters. Concatenating the value \"" + eValue + "\" to the first <" + FIELD_MAX_SIZE + "> characters! Data loss might occur!" );
eValue = eValue.substring( 0, FIELD_MAX_SIZE );
}
return eValue;
}
/**
* Generates a (for this machine unique) item name for an Amazon SimpleDB
* log-line.
*
* @return The name for the next item name.
*/
private String nextItemName() {
synchronized ( this ) {
if ( _itemNameCounter == Integer.MAX_VALUE ) {
UniqueIdGeneratorSingleton.getInstance().next();
_itemNameCounter = 0;
}
}
return _baseItemName + "-" /* + System.currentTimeMillis() + "-" */ + _itemNameCounter++;
}
/**
* Flushes the buffer with {@link Records} already encapsulated in Amazon
* SimpleDB's items.
*/
protected synchronized void flushBuffer() {
// ---------------------------------
// More than one item, we can flush:
// ---------------------------------
if ( _replaceableItemBuffer.size() > 0 ) {
final List eReplaceableItemBufferFrame = new ArrayList<>();
BatchPutAttributesRequest eBatchPutAttributesRequest;
int theBufferSize;
Exception eLastException;
// ------------------------------------------
// Do we have any items to flush to SimpleDB?
// ------------------------------------------
while ( _replaceableItemBuffer.size() > 0 || eReplaceableItemBufferFrame.size() > 0 ) {
theBufferSize = _replaceableItemBuffer.size();
while ( _replaceableItemBuffer.size() > 0 && eReplaceableItemBufferFrame.size() < BUFFER_WRITE_SIZE ) {
eReplaceableItemBufferFrame.add( _replaceableItemBuffer.remove( 0 ) );
}
// -------------------------------------------------------------
// As of thread race conditions, a call to #destroy() can
// cause the buffer to be empty when reaching the loop above.
// Only in case the frame gets some items we are flushing to
// Amazon SimpleDB:
// -------------------------------------------------------------
if ( eReplaceableItemBufferFrame.size() > 0 ) {
LOGGER.log( Level.FINE, "Found <" + theBufferSize + "> lines in the buffer, flushing frame of <" + eReplaceableItemBufferFrame.size() + "> lines in this iteration of the buffer for domain \"" + getAmazonSimpleDbDomainName() + "\"." );
eBatchPutAttributesRequest = new BatchPutAttributesRequest( getAmazonSimpleDbDomainName(), eReplaceableItemBufferFrame );
eLastException = null;
boolean isDifferentExcetionCaught = false;
final RetryCounter theRetryCounter = new RetryCounter( IoRetryCount.NORM.getValue(), LatencySleepTime.NORM.getTimeMillis(), LoopExtensionTime.NORM.getTimeMillis() );
while ( theRetryCounter.nextRetry() ) {
try {
getAmazonSimpleDbClient().batchPutAttributes( eBatchPutAttributesRequest );
break;
}
catch ( AmazonClientException e ) {
if ( eLastException != null && !e.getClass().equals( eLastException.getClass() ) ) {
isDifferentExcetionCaught = true;
}
eLastException = e;
String msg = "Failed flushing <" + eReplaceableItemBufferFrame.size() + "> lines of <" + theBufferSize + "> from the log buffer to Amazon SimpleDB. Retrying in <" + ( theRetryCounter.getNextRetryDelayMillis() / 1000 ) + "> seconds ...";
msg += toMessage( e );
LOGGER.log( Level.WARNING, msg );
}
if ( !theRetryCounter.hasNextRetry() ) {
String msg = "Failed flushing <" + eReplaceableItemBufferFrame.size() + "> lines of <" + theBufferSize + "> from log buffer to Amazon SimpleDB db. Retried <" + theRetryCounter.getRetryNumber() + "> times.";
msg += " The last error message was: " + toMessage( eLastException );
LOGGER.log( Level.SEVERE, msg, eLastException );
}
}
// ---------------------------------------------------------
// Clear the buffer frame if: i) No exception was caught at
// all ii) The last exception caught was no Amazon's
// "RequestTimeoutException" or "ServiceUnavailable" iii)
// The last exception caught was an Amazon/
// "RequestTimeoutException" but a different exception was
// caught before (isDifferentExcetionCaught = true)
// ---------------------------------------------------------
if ( eLastException == null || !( isRequestTimeoutException( eLastException ) || isServiceUnavailableException( eLastException ) ) || isDifferentExcetionCaught ) {
eReplaceableItemBufferFrame.clear();
}
}
}
}
}
/**
* This method only writes the buffer in case it exceeds an given size.
*
* @param aReplaceableItem the replaceable item
*/
private synchronized void writeBuffer( ReplaceableItem aReplaceableItem ) {
// -------------------------------------------
// Is it already full (due to race conditions?
// -------------------------------------------
// if ( replaceableItemBuffer.size() >= BUFFER_WRITE_SIZE ) {
// flushBuffer();
// }
_replaceableItemBuffer.add( aReplaceableItem );
// -------------------------------------------
// Is it now full?
// -------------------------------------------
if ( _replaceableItemBuffer.size() >= BUFFER_WRITE_SIZE ) {
flushBuffer();
}
}
// /////////////////////////////////////////////////////////////////////////
// INNER CLASSES:
// /////////////////////////////////////////////////////////////////////////
/**
* This daemon is called in time intervals to check whether the buffer must
* be flushed.
*/
private class BufferDaemon extends TimerTask {
/**
* {@inheritDoc}
*/
@Override
public void run() {
flushBuffer();
}
}
}