Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2010-2022 60East Technologies Inc., All Rights Reserved.
//
// This computer software is owned by 60East Technologies Inc. and is
// protected by U.S. copyright laws and other laws and by international
// treaties. This computer software is furnished by 60East Technologies
// Inc. pursuant to a written license agreement and may be used, copied,
// transmitted, and stored only in accordance with the terms of such
// license agreement and with the inclusion of the above copyright notice.
// This computer software or any other copies thereof may not be provided
// or otherwise made available to any other person.
//
// U.S. Government Restricted Rights. This computer software: (a) was
// developed at private expense and is in all respects the proprietary
// information of 60East Technologies Inc.; (b) was not developed with
// government funds; (c) is a trade secret of 60East Technologies Inc.
// for all purposes of the Freedom of Information Act; and (d) is a
// commercial item and thus, pursuant to Section 12.212 of the Federal
// Acquisition Regulations (FAR) and DFAR Supplement Section 227.7202,
// Government's use, duplication or disclosure of the computer software
// is subject to the restrictions set forth by 60East Technologies Inc..
//
////////////////////////////////////////////////////////////////////////////
package com.crankuptheamps.client;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.math.BigInteger;
import java.nio.channels.Channels;
import java.nio.channels.FileChannel;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CharsetEncoder;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileSystems;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.lang.Long;
import java.util.*;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import com.crankuptheamps.client.fields.BookmarkField;
import com.crankuptheamps.client.fields.BookmarkRangeField;
import com.crankuptheamps.client.fields.Field;
import com.crankuptheamps.client.exception.*;
/** LoggedBookmarkStore implements a sequentially written log
* of incoming and discarded messages. This store tracks every
* bookmark processed in a file. An application should periodically call
* {@link #prune} to manage the size of the file by removing
* outdated entries.
*/
public class LoggedBookmarkStore implements BookmarkStore
{
// Each entry begins with a single byte indicating the type of entry:
// a new bookmark, or a discard of a previous one.
static final byte ENTRY_BOOKMARK = (byte)'b';
static final byte ENTRY_DISCARD = (byte)'d';
static final byte ENTRY_PERSISTED = (byte)'p';
/**
* The Subscription object is used to represent internal bookmark state
* for the messages received and discarded on a specific subscription
* within the bookmark store.
*/
protected static class Subscription implements com.crankuptheamps.client.Subscription
{
// This subscription's ID.
Field _sub;
// The last persisted bookmark
BookmarkField _lastPersisted;
// If the subscription used a range, save it here
BookmarkRangeField _range = new BookmarkRangeField();
/**
* The last-modified timestamp of the backing bookmark log file, just
* before recovery is initiated. When this is not null we intend to
* include it in the list of bookmarks returned by getMostRecentList()
* until a message is discarded on the sub or the sub state is purged.
*
* Originally we thought the recovery timestamp could be stored
* as the last-persisted for each recovered sub, but it must be
* stored separately here because setting it as a sub's
* last-persisted could cause us to lose a persisted ack that
* is earlier than the backing file's last-modified time.
*/
protected volatile String _recoveryTimestamp = null;
static final Field EPOCH_FIELD = new Field(Client.Bookmarks.EPOCH);
// A set of all of the entries recovered from the bookmark
// log file after the file's most-recent -- i.e., whatever entries
// are left in the bookmark ring buffer after recover().
HashMap _recovered = new HashMap();
BookmarkRingBuffer _ring = new BookmarkRingBuffer();
// The per-subscription memory of what we've seen from publishers
HashMap _publishers = new HashMap();
// The store that we log to when the time comes.
LoggedBookmarkStore _parent;
// The Subscription lock
final Lock _lock = new ReentrantLock();
// The encoder/decoder for the Subscription
final CharsetEncoder _encoder = StandardCharsets.UTF_8.newEncoder();
final CharsetDecoder _decoder = StandardCharsets.UTF_8.newDecoder();
public Subscription()
{
// Default starting point for last persisted is EPOCH
_lastPersisted = new BookmarkField();
_lastPersisted.copyFrom(EPOCH_FIELD);
}
/**
* Reset the state of this subscription object such that it can be
* returned to the pool for reuse.
*/
public void reset() {
_sub.reset();
_lastPersisted.copyFrom(EPOCH_FIELD);
_recoveryTimestamp = null;
_recovered.clear();
_ring.reset();
_publishers.clear();
_range.reset();
}
public void init(Field subscriptionId, LoggedBookmarkStore parent)
{
_sub = subscriptionId.copy();
_ring.setSubId(_sub);
_parent = parent;
}
public String getRecoveryTimestamp()
{
return _recoveryTimestamp;
}
protected final void setRecoveryTimestamp(String rts)
{
_recoveryTimestamp = rts;
}
public long log(BookmarkField bookmark) throws IOException, CommandException
{
_lock.lock();
try {
if (!bookmark.isRange()) {
// Check to see if this is a recovered bookmark.
final Long recoveredValue = _recovered.remove(bookmark);
if (recoveredValue != null)
{
return recoveredValue;
}
// Add this entry onto our list to remember in order.
if (!bookmark.isBookmarkList()) {
return _log(bookmark);
}
else {
// If we're logging a list, we need to mark all items
// in the list as discarded.
long seq = 0;
for (BookmarkField bm : bookmark.parseBookmarkList()) {
isDiscarded(bm);
seq = _log(bm);
if (seq != 0) {
_ring.discard(seq);
_parent._recentChanged = true;
}
}
if (_parent._adapter != null) {
_parent.adapterUpdate(_sub, bookmark);
}
return 0;
}
}
else {
_range.copyFrom(bookmark);
if (!_range.isValid()) {
throw new CommandException("Invalid bookmark range specified");
}
long seq = 0;
if (_range.isStartExclusive()) {
// Parse the start of the rannge and log/discard each
BookmarkField start = _range.getStart();
if (!start.isBookmarkList()) {
isDiscarded(start);
seq = _log(start);
if (seq != 0) {
_ring.discard(seq);
_parent._recentChanged = true;
}
}
else {
for (BookmarkField bm : start.parseBookmarkList()) {
isDiscarded(bm);
seq = _log(bm);
if (seq != 0) {
_ring.discard(seq);
_parent._recentChanged = true;
}
}
seq = 0;
}
}
if (_parent._adapter != null) {
_parent.adapterUpdate(_sub, _range);
}
return seq;
}
}
finally {
_lock.unlock();
}
}
private long _log(BookmarkField bm) throws IOException
{
long seq = 0;
if (!bm.isTimestamp()) {
seq = _ring.log(bm);
while (seq == 0) {
_lock.unlock();
try {
_ring.checkResize();
}
finally {
_lock.lock();
}
seq = _ring.log(bm);
}
}
else {
setLastPersisted(bm);
}
return seq;
}
public void discard(long index) throws IOException
{
_lock.lock();
try {
BookmarkRingBuffer.Entry entry = _ring.getByIndex(index);
if(entry == null || !entry.isActive())
{
return;
}
if (!_parent._recovering) {
_parent.write(_sub, LoggedBookmarkStore.ENTRY_DISCARD,
entry.getBookmark());
}
if (_ring.discard(index))
{
_parent._recentChanged = true;
_recoveryTimestamp = null;
if (_parent._adapter != null) {
_parent.adapterUpdate(_sub,
(BookmarkField)getMostRecentList(false));
}
}
}
finally {
_lock.unlock();
}
}
/**
* Check to see if this message is older than the most recent one seen,
* and if it is, then check to see if it is discarded.
*/
public boolean isDiscarded(BookmarkField bookmark) throws IOException
{
_lock.lock();
try {
if (bookmark.isRange() || bookmark.isBookmarkList())
return false;
Long recoveredIndex = _recovered.get(bookmark);
long publisher = bookmark.getPublisherId();
long sequence = bookmark.getSequenceNumber();
if(!_publishers.containsKey(publisher) ||
unsignedLongLess(_publishers.get(publisher), sequence))
{
_publishers.put(publisher, sequence);
if (recoveredIndex == null) {
return false;
}
}
if (recoveredIndex != null)
{
long recoveredVal = recoveredIndex;
BookmarkRingBuffer.Entry entry = _ring.getByIndex(recoveredVal);
if (entry == null && recoveredVal < _ring.getStartIndex())
{
// If the ring buffer no longer has an entry for this index
// and the index is before the ring's start index, then
// this is a stale recovered map entry whose bookmark has
// already been discarded. Don't let it get logged again.
// Remove the stale mapping and return true (it's discarded).
_recovered.remove(bookmark);
return true;
}
// Need current active state
return (entry == null) ? false : !entry.isActive();
}
return !_parent._recovering;
}
finally {
_lock.unlock();
}
}
public Field getLastPersisted() {
_lock.lock();
try { return _lastPersisted; }
finally {
_lock.unlock();
}
}
public BookmarkRangeField getRange() {
_lock.lock();
try { return _range; }
finally {
_lock.unlock();
}
}
public Field getMostRecent()
{
_lock.lock();
try {
return getMostRecent(false);
}
finally {
_lock.unlock();
}
}
protected Field getMostRecent(boolean updateRecovery_)
{
_lock.lock();
try {
// when this is called, we'll take a moment to update the list of things
// recovered, so we don't accidentally log anything we ought not to.
if (updateRecovery_ && _parent._recentChanged) updateRecovery();
return _ring.getLastDiscarded();
}
finally {
_lock.unlock();
}
}
/** publisherId_ and sequence are signed, but the actual id's are
* unsigned, so we have to do some work to create a valid string.
*/
private static String convertUnsignedLongToString(long publisherId_)
{
final BigInteger offset = BigInteger.valueOf(Long.MAX_VALUE)
.shiftLeft(1).add(BigInteger.valueOf(2));
if(publisherId_<0)
{
return offset.add(BigInteger.valueOf(publisherId_)).toString();
}
else
{
return Long.toString(publisherId_);
}
}
/** publisherId_ and sequence are signed, but the actual id's are
* unsigned, so we have to do some work to compare them.
*/
private static boolean unsignedLongLess(long seqLeft_, long seqRight_)
{
final BigInteger offset = BigInteger.valueOf(Long.MAX_VALUE)
.shiftLeft(1).add(BigInteger.valueOf(2));
if(seqLeft_<0 || seqRight_<0)
{
BigInteger left = offset.add(BigInteger.valueOf(seqLeft_));
BigInteger right = offset.add(BigInteger.valueOf(seqRight_));
return left.compareTo(right) < 0;
}
else
{
return seqLeft_ < seqRight_;
}
}
private static boolean unsignedLongLessEqual(long seqLeft_, long seqRight_)
{
final BigInteger offset = BigInteger.valueOf(Long.MAX_VALUE)
.shiftLeft(1).add(BigInteger.valueOf(2));
if(seqLeft_<0 || seqRight_<0)
{
BigInteger left = offset.add(BigInteger.valueOf(seqLeft_));
BigInteger right = offset.add(BigInteger.valueOf(seqRight_));
return left.compareTo(right) <= 0;
}
else
{
return seqLeft_ <= seqRight_;
}
}
public Field getMostRecentList(boolean useList)
{
_lock.lock();
try {
boolean rangeIsValid = _range.isValid();
BookmarkField lastDiscarded = (BookmarkField)_ring.getLastDiscarded();
boolean useLastDiscarded = (lastDiscarded != null &&
!lastDiscarded.isNull());
long lastDiscardedPub = 0;
long lastDiscardedSeq = 0;
boolean useLastPersisted = (_lastPersisted != null &&
_lastPersisted.length > 1);
long lastPersistedPub = 0;
long lastPersistedSeq = 0;
if (useLastPersisted)
{
lastPersistedPub = _lastPersisted.getPublisherId();
lastPersistedSeq = _lastPersisted.getSequenceNumber();
}
if (useLastDiscarded)
{
if (useLastPersisted && _ring.isEmpty()
&& (!rangeIsValid || _range.getEnd().equals(_lastPersisted)))
{
useLastDiscarded = false;
}
else
{
lastDiscardedPub = lastDiscarded.getPublisherId();
lastDiscardedSeq = lastDiscarded.getSequenceNumber();
// Only use one if they are same publisher
if (useLastPersisted && (lastDiscardedPub == lastPersistedPub))
{
useLastDiscarded = (lastDiscardedSeq < lastPersistedSeq);
useLastPersisted = !useLastDiscarded;
}
}
}
StringBuilder recentStr = new StringBuilder();
BookmarkField recentList = new BookmarkField();
if (_recoveryTimestamp != null)
{
recentStr.append(_recoveryTimestamp);
}
if (useLastDiscarded)
{
if (recentStr.length() > 0) recentStr.append(',');
recentStr.append((lastDiscarded).getValue(_decoder));
}
// If we don't have a last persisted or a last discarded OR we are
// expecting persisted acks but haven't received one yet, then we
// should try to build a list of bookmarks based on publishers we
// have seen so far, if any, or return EPOCH.
if (useList &&
((!useLastPersisted && !useLastDiscarded)
|| (_lastPersisted != null &&
_lastPersisted.equals(EPOCH_FIELD))
))
{
if (_publishers.isEmpty() && !rangeIsValid)
{
// Set last persisted to EPOCH and return it
if (_lastPersisted == null) {
_lastPersisted = new BookmarkField();
_lastPersisted.copyFrom(EPOCH_FIELD);
}
return _lastPersisted;
}
// If an EPOCH lastDiscarded value was added, remove it.
if (useLastDiscarded && lastDiscarded.equals(EPOCH_FIELD))
{
int len = recentStr.length();
if (len == 1) // Only contains EPOCH
recentStr.setLength(0);
else if (len > 2) // Ends with ",0" (,EPOCH)
recentStr.setLength(len - 2);
}
Iterator it = _publishers.entrySet().iterator();
while (it.hasNext())
{
Map.Entry pairs = (Map.Entry)it.next();
long pubId = (Long)pairs.getKey();
if (pubId == 0) continue;
if (useLastDiscarded && pubId == lastDiscardedPub) continue;
long seq = (Long)pairs.getValue();
if (recentStr.length() > 0) recentStr.append(',');
recentStr.append(convertUnsignedLongToString(pubId))
.append(BookmarkField.SEPARATOR_CHAR)
.append(convertUnsignedLongToString(seq))
.append(BookmarkField.SEPARATOR_CHAR);
}
recentList.setValue(recentStr.toString(), _encoder);
if (rangeIsValid) {
if (recentList.length > 1
&& (_range.isStartInclusive()
|| !recentList.equals(_range.getStart()))) {
_range.replaceStart(recentList, true);
}
return _range;
}
return recentList;
}
if (useLastPersisted)
{
if (recentStr.length() > 0) recentStr.append(',');
recentStr.append(_lastPersisted.getValue(_decoder));
}
recentList.setValue(recentStr.toString(), _encoder);
if (rangeIsValid) {
if (recentList.length > 1
&& (_range.isStartInclusive()
|| !recentList.equals(_range.getStart()))) {
_range.replaceStart(recentList, true);
}
return _range;
}
return recentList;
}
finally {
_lock.unlock();
}
}
private void updateRecovery()
{
_recovered.clear();
long end = _ring.getEndIndex();
for(long index = _ring.getStartIndex(); index< end; ++index)
{
BookmarkRingBuffer.Entry entry = _ring.getByIndex(index);
if(entry != null && entry._bookmark != null && !entry._bookmark.isNull())
{
_recovered.put(entry.getBookmark(), index);
}
}
}
private void getActiveEntries(ArrayList entryList_)
{
_ring.getRecoveryEntries(entryList_);
}
/**
* Old style of setting a persisted bookmark no longer used.
* @deprecated use {@link #setLastPersisted(BookmarkField)} instead.
* @param bookmark Used to be the bookmark sequence number.
*/
@Deprecated
public void setLastPersisted(long bookmark) throws IOException
{
_lock.lock();
try {
BookmarkRingBuffer.Entry entry = _ring.getByIndex(bookmark);
if (entry == null) return;
BookmarkField bookmarkField = entry.getBookmark();
if (bookmarkField == null || bookmarkField.isNull()) return;
long publisherId = bookmarkField.getPublisherId();
boolean lastPersistedNull = (_lastPersisted == null);
if (!lastPersistedNull &&
publisherId == _lastPersisted.getPublisherId() &&
unsignedLongLessEqual(bookmarkField.getSequenceNumber(),
_lastPersisted.getSequenceNumber()))
{
return;
}
if (!lastPersistedNull) _lastPersisted.reset();
_lastPersisted = bookmarkField.copy();
if (!_parent._recovering) {
_parent.write(_sub, ENTRY_PERSISTED, bookmarkField);
}
if (lastPersistedNull
|| publisherId == _ring.getLastDiscarded().getPublisherId())
{
_parent._recentChanged = true;
_recoveryTimestamp = null;
if (!_parent._recovering && _parent._adapter != null) {
_parent.adapterUpdate(_sub,
(BookmarkField)getMostRecentList(false));
}
}
}
finally {
_lock.unlock();
}
}
public void setLastPersisted(BookmarkField bookmark) throws IOException
{
_lock.lock();
try {
if (bookmark == null || bookmark.isNull()
|| bookmark.equals(_lastPersisted)
|| bookmark.isRange())
return;
if (bookmark.isTimestamp())
{
if (_lastPersisted != null) _lastPersisted.reset();
_lastPersisted = bookmark.copy();
_parent.write(_sub, ENTRY_PERSISTED, bookmark);
_parent._recentChanged = true;;
if (_parent._adapter != null) {
_parent.adapterUpdate(_sub,
(BookmarkField)getMostRecentList(false));
}
return;
}
long publisherId = bookmark.getPublisherId();
boolean lastPersistedNull = (_lastPersisted == null);
if (!lastPersistedNull &&
publisherId == _lastPersisted.getPublisherId() &&
unsignedLongLessEqual(bookmark.getSequenceNumber(),
_lastPersisted.getSequenceNumber()))
{
return;
}
if (!lastPersistedNull) _lastPersisted.reset();
_lastPersisted = bookmark.copy();
if (!_parent._recovering) {
_parent.write(_sub, ENTRY_PERSISTED, bookmark);
}
if (lastPersistedNull
|| _ring.isEmpty()
|| publisherId == _ring.getLastDiscarded().getPublisherId())
{
_parent._recentChanged = true;
_recoveryTimestamp = null;
if (!_parent._recovering && _parent._adapter != null) {
_parent.adapterUpdate(_sub,
(BookmarkField)getMostRecentList(false));
}
}
}
finally {
_lock.unlock();
}
}
public long getOldestBookmarkSeq()
{
_lock.lock();
try {
return _ring.getStartIndex();
}
finally {
_lock.unlock();
}
}
/**
* Call on a Subscription object just after recovery is performed to
* convert logged entries into recovery entries and set the publishers
* cache state to the earliest sequence seen for each publisher minus
* one.
*
* NOTE: If after recovery lastDiscarded for this sub is null (i.e.
* nothing was discarded) and lastPersisted is null or EPOCH (i.e.
* no persisted ack was recorded in the log), then we can throw away
* this subscription state and return this Subscription object to the
* pool.
*
* @return Indicates whether this subscription's state was reset
* (because it's undeeded due to the lack of any discards and
* persisted acks) and this object should be returned to the
* Subscription pool.
*/
public boolean justRecovered()
{
_lock.lock();
try {
BookmarkField ld = _ring.getLastDiscarded();
if ((ld == null || ld.isNull() || ld.equals(EPOCH_FIELD))
&& (_lastPersisted == null || _lastPersisted.isNull()
|| _lastPersisted.equals(EPOCH_FIELD))
&& !_range.isValid())
{
// Reset this sub for reuse.
reset();
return true;
}
updateRecovery();
ArrayList active = new ArrayList();
getActiveEntries(active);
setPublishersToDiscarded(active, _publishers);
}
finally {
_lock.unlock();
}
return false;
}
public static void setPublishersToDiscarded(
List active,
Map publishers)
{
if (active == null || publishers == null || publishers.isEmpty()) return;
Iterator it = active.iterator();
while (it.hasNext()) {
BookmarkRingBuffer.Entry entry = it.next();
if (!entry.isActive()) continue;
BookmarkField bf = entry.getBookmark();
long seq = bf.getSequenceNumber();
if (seq == 0) continue;
long publisher = bf.getPublisherId();
Long pubSeq = publishers.get(publisher);
if (pubSeq != null && unsignedLongLessEqual(seq, pubSeq)) {
publishers.put(publisher, seq - 1);
}
}
}
public void setResizeHandler(BookmarkStoreResizeHandler handler, BookmarkStore store)
{
_ring.setResizeHandler(handler, store);
}
/**
* Lock self's internal lock.
* Used by LoggedBookmarkStore when gathering Subscriptions to prune.
*/
void lock()
{
_lock.lock();
}
/**
* Unlock self's internal lock.
* Used by LoggedBookmarkStore when finishing a prune() operation.
*/
void unlock()
{
_lock.unlock();
}
}
HashMap _subs = new HashMap();
RandomAccessFile _file;
String _fileName;
volatile boolean _recovering = false;
volatile boolean _recentChanged = true;
final int VERSION = 2;
Pool _pool;
BookmarkStoreResizeHandler _resizeHandler = null;
private int _serverVersion = Client.MIN_MULTI_BOOKMARK_VERSION;
final CharsetEncoder _encoder = StandardCharsets.UTF_8.newEncoder();
final CharsetDecoder _decoder = StandardCharsets.UTF_8.newDecoder();
// The Store lock
final Lock _lock = new ReentrantLock();
final Lock _subsLock = new ReentrantLock();
RecoveryPointAdapter _adapter = null;
RecoveryPointFactory _factory = null;
/**
* Same as calling LoggedBookmarkStore(path, 1, false).
*
* @see #LoggedBookmarkStore(String, int, boolean)
* @param path The path to the backing bookmark log file.
* @throws IOException If there is a problem creating, reading, or writing
* the backing file.
*/
public LoggedBookmarkStore(String path) throws IOException
{
this(path, 1);
}
/**
* Same as calling LoggedBookmarkStore(path, targetNumberOfSubscriptions, false).
*
* @see #LoggedBookmarkStore(String, int, boolean)
* @param path The path to the backing bookmark log file.
* @param targetNumberOfSubscriptions The initial capacity for the number
* @throws IOException If there is a problem creating, reading, or writing
* the backing file.
*/
public LoggedBookmarkStore(String path, int targetNumberOfSubscriptions) throws IOException
{
this(path, targetNumberOfSubscriptions, false);
}
/**
* A file-backed bookmark store implementation that fully supports
* discarding messages in an order different from the order they arrived
* (i.e. out-of-order discards) and fail-over to a replicated server.
* All messages must eventually be discarded, otherwise memory usage will
* become proportional to the number of messages received on a
* bookmark subscription since the first undiscarded message.
* This implementation requires that the prune() method be called
* periodically to removed unneeded discarded bookmarks from the
* backing-file, otherwise it will continue to grow without bound.
* The prune() method is thread-safe and can be called from any thread.
*
* @param path The path to the backing bookmark log file.
* @param targetNumberOfSubscriptions The initial capacity for the number
* of bookmark subscriptions you anticipate creating on the AMPS
* client instance that this bookmark store is registered on. This
* will grow as needed if more subscriptions are created than
* anticipated.
* @param useLastModifiedTime Indicates whether the recovery timestamp
* feature should be used. If true, the last-modified time of the
* backing file is included (as an AMPS timestamp bookmark in a
* comma-separated list of bookmarks) when getMostRecent() is called
* after recovering from a bookmark file. This feature could be
* useful if you have an infrequently run process that is run on a
* schedule that is longer than the AMPS server keeps messages in
* its transaction log. When this process is started and recovers
* a bookmark log full of old bookmarks that are no longer available
* using the MOST_RECENT bookmark indicator, the recovery timestamp
* will cause the bookmark subscription to begin at the start of the
* transaction log (e.g. EPOCH), rather than its tail (e.g. NOW).
*
* @throws IOException If there is a problem creating, reading, or writing
* the backing file.
*/
public LoggedBookmarkStore(String path,
int targetNumberOfSubscriptions,
boolean useLastModifiedTime) throws IOException
{
File bmLogFile = new File(path);
String recoveryTimestamp = null;
if (useLastModifiedTime && bmLogFile.exists()) {
Date lastMod = new Date(bmLogFile.lastModified());
TimeZone tz = TimeZone.getTimeZone("UTC");
DateFormat df = new SimpleDateFormat("yyyyMMdd'T'HHmmss'Z'");
df.setTimeZone(tz);
recoveryTimestamp = df.format(lastMod);
}
_pool = new Pool(Subscription.class, targetNumberOfSubscriptions);
_file = new RandomAccessFile(path, "rw");
_fileName = path;
try
{
recover();
if (recoveryTimestamp != null)
{
// Initialize the recovery timestamp on each recovered sub.
for (Subscription sub: _subs.values())
{
sub.setRecoveryTimestamp(recoveryTimestamp);
}
}
}
catch(IOException ioex)
{
try
{
_file.close();
}
catch (IOException ignoreTheCloseException)
{
}
finally
{
_file = null;
}
throw ioex;
}
}
/**
* A file-backed bookmark store implementation that fully supports
* discarding messages in an order different from the order they arrived
* (i.e. out-of-order discards) and fail-over to a replicated server.
* All messages must eventually be discarded, otherwise memory usage will
* become proportional to the number of messages received on a
* bookmark subscription since the first undiscarded message.
* This implementation requires that the prune() method be called
* periodically to removed unneeded discarded bookmarks from the
* backing-file, otherwise it will continue to grow without bound.
* The prune() method is thread-safe and can be called from any thread.
* The store also has a backup RecoveryPointAdapter used in case the file
* is deleted.
*
* @param path The path to the backing bookmark log file.
* @param targetNumberOfSubscriptions The initial capacity for the number
* of bookmark subscriptions you anticipate creating on the AMPS
* client instance that this bookmark store is registered on. This
* will grow as needed if more subscriptions are created than
* anticipated.
* @param useLastModifiedTime Indicates whether the recovery timestamp
* feature should be used. If true, the last-modified time of the
* backing file is included (as an AMPS timestamp bookmark in a
* comma-separated list of bookmarks) when getMostRecent() is called
* after recovering from a bookmark file. This feature could be
* useful if you have an infrequently run process that is run on a
* schedule that is longer than the AMPS server keeps messages in
* its transaction log. When this process is started and recovers
* a bookmark log full of old bookmarks that are no longer available
* using the MOST_RECENT bookmark indicator, the recovery timestamp
* will cause the bookmark subscription to begin at the start of the
* transaction log (e.g. EPOCH), rather than its tail (e.g. NOW).
* @param adapter The RecoveryPointAdapter backing up the store. The
* adapter will be sent FixedRecoveryPoints.
*
* @throws IOException If there is a problem creating, reading, or writing
* the backing file.
*/
public LoggedBookmarkStore(String path,
int targetNumberOfSubscriptions,
boolean useLastModifiedTime,
RecoveryPointAdapter adapter) throws IOException
{
this(path, targetNumberOfSubscriptions, useLastModifiedTime, adapter, new FixedRecoveryPointFactory());
}
/**
* A file-backed bookmark store implementation that fully supports
* discarding messages in an order different from the order they arrived
* (i.e. out-of-order discards) and fail-over to a replicated server.
* All messages must eventually be discarded, otherwise memory usage will
* become proportional to the number of messages received on a
* bookmark subscription since the first undiscarded message.
* This implementation requires that the prune() method be called
* periodically to removed unneeded discarded bookmarks from the
* backing-file, otherwise it will continue to grow without bound.
* The prune() method is thread-safe and can be called from any thread.
* The store also has a backup RecoveryPointAdapter used in case the file
* is deleted.
*
* @param path The path to the backing bookmark log file.
* @param targetNumberOfSubscriptions The initial capacity for the number
* of bookmark subscriptions you anticipate creating on the AMPS
* client instance that this bookmark store is registered on. This
* will grow as needed if more subscriptions are created than
* anticipated.
* @param useLastModifiedTime Indicates whether the recovery timestamp
* feature should be used. If true, the last-modified time of the
* backing file is included (as an AMPS timestamp bookmark in a
* comma-separated list of bookmarks) when getMostRecent() is called
* after recovering from a bookmark file. This feature could be
* useful if you have an infrequently run process that is run on a
* schedule that is longer than the AMPS server keeps messages in
* its transaction log. When this process is started and recovers
* a bookmark log full of old bookmarks that are no longer available
* using the MOST_RECENT bookmark indicator, the recovery timestamp
* will cause the bookmark subscription to begin at the start of the
* transaction log (e.g. EPOCH), rather than its tail (e.g. NOW).
* @param adapter The RecoveryPointAdapter backing up the store.
* @param factory The RecoveryPointFactory used to craete RecoveryPoints
* that are sent to the adapter.
*
* @throws IOException If there is a problem creating, reading, or writing
* the backing file.
*/
public LoggedBookmarkStore(String path,
int targetNumberOfSubscriptions,
boolean useLastModifiedTime,
RecoveryPointAdapter adapter,
RecoveryPointFactory factory) throws IOException
{
String recoveryTimestamp = null;
File bmLogFile = new File(path);
if (useLastModifiedTime && bmLogFile.exists()) {
Date lastMod = new Date(bmLogFile.lastModified());
TimeZone tz = TimeZone.getTimeZone("UTC");
DateFormat df = new SimpleDateFormat("yyyyMMdd'T'HHmmss'Z'");
df.setTimeZone(tz);
recoveryTimestamp = df.format(lastMod);
}
_pool = new Pool(Subscription.class, targetNumberOfSubscriptions);
_file = new RandomAccessFile(path, "rw");
_fileName = path;
try
{
_recovering = true;
Message m = new JSONMessage(_encoder, _decoder);
for (RecoveryPoint rp : adapter) {
if (rp == null) break;
Field subId = rp.getSubId();
m.reset();
m.setSubId(subId.buffer, subId.position, subId.length);
BookmarkField bookmark = rp.getBookmark();
if (bookmark.isRange()) {
m.setBookmark(bookmark.buffer, bookmark.position,
bookmark.length);
try {
log(m);
} catch(AMPSException e) { } // Always a valid range
}
else {
try {
for (BookmarkField bm : bookmark.parseBookmarkList())
{
if (!bm.isTimestamp())
{
m.setBookmark(bm.buffer, bm.position,
bm.length);
isDiscarded(m);
if (log(m) > 0)
discard(m);
}
else
{
find(subId).setRecoveryTimestamp(bm.toString());
}
}
} catch(AMPSException e) { } // Always valid
}
}
}
finally {
_recovering = false;
}
try
{
recover();
if (recoveryTimestamp != null)
{
// Initialize the recovery timestamp on each recovered sub.
for (Subscription sub: _subs.values()) {
sub.setRecoveryTimestamp(recoveryTimestamp);
}
}
}
catch(IOException ioex) {
try
{
_file.close();
}
catch (IOException ignoreTheCloseException) {
}
finally {
_file = null;
}
throw ioex;
}
// Set after recovery complete to avoid sending any updates
// during recovery
_adapter = adapter;
_factory = factory;
}
/**
* Remove outdated entries in the bookmark store.
* This function creates a temporary file, copies current entries to
* that file, and then replaces the current file with the temporary
* file.
* @throws IOException Thrown when an operation on the file fails.
* @throws StoreException Thrown when any other operation fails with details on the failure.
*/
public void prune() throws IOException, StoreException
{
String name = _fileName + ".tmp";
prune(name);
}
/**
* Remove outdated entries in the bookmark store.
* This function creates a temporary file, copies current entries to
* that file, and then replaces the current file with the temporary
* file.
* @param tmpFileName_ The name of the temporary file.
* @throws IOException Thrown when an operation on the file fails.
* @throws StoreException Thrown when any other operation fails with details on the failure.
*/
public void prune(String tmpFileName_) throws IOException, StoreException
{
_subsLock.lock();
try
{
List> subs = _lockSubs();
try
{
prune(tmpFileName_,subs);
}
finally
{
_unlockSubs(subs);
}
}
finally
{
_subsLock.unlock();
}
}
/**
* Remove outdated entries in the bookmark store.
* This function is used internally and processes a list of subscriptions
* whose locks are already held, so that the locking order of subscription
* then store can be preserved.
* @param tmpFileName_ The name of the temporary file.
* @param subs_ The list of Subscriptions to prune.
*/
private void prune(String tmpFileName_,
List> subs_)
throws IOException, StoreException
{
_lock.lock();
try
{
if(_file == null)
{
throw new StoreException("Store not open.");
}
if (!_recentChanged) {
return;
}
RandomAccessFile file = new RandomAccessFile(tmpFileName_, "rw");
try
{
file.getChannel().force(false);
file.writeInt(VERSION);
file.writeByte((byte)'\n');
StringBuilder bookmarkBuilder = new StringBuilder(64);
Field bookmark = new Field();
for (Map.Entry sub : subs_)
{
Field subId = sub.getKey();
assert(subId != null);
Subscription subscription = sub.getValue();
BookmarkField recent = (BookmarkField)subscription.getMostRecent().copy();
if (recent.isNull()) {
recent.copyFrom(subscription.EPOCH_FIELD);
}
long recentPub = recent.getPublisherId();
// Check for the Subscription's range
if (subscription.getRange().isValid())
{
// Update and return the range
Field range = subscription.getMostRecentList(true);
// A range doesn't need to be discarded so only need log
writeBookmarkToFile(file, subId, range, ENTRY_BOOKMARK);
// Ignore recent after this because it's a range
recentPub = 0;
recent.reset();
}
HashMap publishers = new HashMap(subscription._publishers);
ArrayList recovered = new ArrayList();
subscription.getActiveEntries(recovered);
Subscription.setPublishersToDiscarded(recovered, publishers);
// First write the highest discard for each publisher other
// than most recent.
Iterator> pubIter = publishers.entrySet().iterator();
while (pubIter.hasNext()) {
Map.Entry publisher = pubIter.next();
long pubId = publisher.getKey();
long pubSeq = publisher.getValue();
if (pubId == 0 || pubSeq == 0 || pubId == recentPub)
continue;
bookmarkBuilder.setLength(0);
bookmarkBuilder.append(Subscription.convertUnsignedLongToString(pubId))
.append(BookmarkField.SEPARATOR_CHAR)
.append(Subscription.convertUnsignedLongToString(pubSeq))
.append(BookmarkField.SEPARATOR_CHAR);
bookmark.set(bookmarkBuilder.toString().getBytes(StandardCharsets.UTF_8), 0, bookmarkBuilder.length());
writeBookmarkToFile(file, subId, bookmark, ENTRY_BOOKMARK);
writeBookmarkToFile(file, subId, bookmark, ENTRY_DISCARD);
}
// Now write the most recent
if (recent.length > 1) {
writeBookmarkToFile(file, subId, recent, ENTRY_BOOKMARK);
writeBookmarkToFile(file, subId, recent, ENTRY_DISCARD);
}
// Now write the last persisted
if (subscription._lastPersisted != null
&& subscription._lastPersisted.length > 1)
{
writeBookmarkToFile(file, subId,
subscription._lastPersisted,
ENTRY_PERSISTED);
}
for (BookmarkRingBuffer.Entry entry : recovered)
{
BookmarkField entryBookmark = entry.getBookmark();
if (entryBookmark == null
|| entryBookmark.isNull())
{
continue;
}
writeBookmarkToFile(file, subId, entryBookmark,
ENTRY_BOOKMARK);
if (!entry.isActive())
writeBookmarkToFile(file, subId,
entryBookmark,
ENTRY_DISCARD);
}
}
}
// If we end up in catch because of an IOException, need to clean tmp file
catch (IOException e)
{
file.close();
File tmp = new File(tmpFileName_);
// We can ignore a failure of delete here, prune has failed
tmp.delete();
throw new StoreException("Failed attempting to prune file " + _fileName + " to " + tmpFileName_, e);
}
// Close the files, delete the original, move the pruned file
file.close();
_file.close();
_file = null;
int retries = 0;
File origTmp = new File(_fileName);
while (retries++ < 3)
{
if (!origTmp.delete())
{
if (retries >= 3) throw new StoreException(
"Failed to delete original file " + _fileName
+ " after completing prune to " + tmpFileName_);
}
else
{
break;
}
}
// Let file system catch up
while (origTmp.exists())
{
try {
Thread.sleep(100L);
} catch (InterruptedException e) {
// Ignore
}
}
retries = 0;
while (retries++ < 3)
{
File tmp = new File(tmpFileName_);
if (!tmp.renameTo(origTmp))
{
if (retries >= 3) throw new StoreException(
"Failed to rename pruned file " + tmpFileName_
+ " to original file name: " + _fileName);
try { Thread.sleep(50); }
catch (InterruptedException ex) { }
}
else
{
break;
}
}
_file = new RandomAccessFile(_fileName, "rw");
if (_file.length() > 0) _file.seek(_file.length());
_recentChanged = false;
}
finally {
_lock.unlock();
}
}
private void writeBookmarkToFile(RandomAccessFile file_, Field subId_, Field bookmark_, byte entry_) throws IOException
{
file_.writeInt(subId_.length);
file_.write(subId_.buffer, subId_.position, subId_.length);
file_.writeByte(entry_);
file_.writeInt(bookmark_.length);
file_.write(bookmark_.buffer, bookmark_.position, bookmark_.length);
file_.writeByte((byte)'\n');
}
void write(Field sub, byte entry, Field data) throws IOException
{
_lock.lock();
try {
if(!_recovering)
{
writeBookmarkToFile(_file, sub, data, entry);
}
}
finally {
_lock.unlock();
}
}
void write(Field sub, byte entry, long data) throws IOException
{
_lock.lock();
try {
if(!_recovering)
{
_file.writeInt(sub.length);
_file.write(sub.buffer, sub.position, sub.length);
_file.writeByte(entry);
_file.writeLong(data);
_file.writeByte((byte)'\n');
}
}
finally {
_lock.unlock();
}
}
/**
* This method is used internally by this {@link BookmarkStore}
* implementation to recover its state from the bookmark store file.
* This allows it to know on restart what messages have been discarded by
* a subscriber and what the most recent message a subscriber should receive
* from a publisher for a bookmark subscription.
*
* IMPORTANT NOTE: When making changes to this method, please run the
* manual test case in
* gfs/sixty/amps-client-java/wip/manual_tests/LargeUnprunedBookmarkStore
* to ensure proper function in certain hard to automate cases such
* as ensuring reasonable memory footprint.
*
* @throws IOException If an unrecoverable problem is detected while
* processing the bookmark log file.
*/
private final void recover() throws IOException
{
_recovering = true;
if (_file.length() < 4) {
try {
_file.writeInt(VERSION);
_file.writeByte((byte)'\n');
}
finally {
_recovering = false;
}
return;
}
int version = 0;
try {
version = _file.readInt();
}
catch (IOException ex) {
_recovering = false;
throw ex;
}
boolean readInts = version == VERSION;
if (readInts) {
try {
_file.readByte();
}
catch (IOException ex) {
_recovering = false;
throw ex;
}
if (_file.length() == 5) {
_recovering = false;
return;
}
}
else {
_file.seek(0);
}
HashMap> ids = new HashMap>();
int maxSubLen = 255;
byte[] sub = new byte[maxSubLen];
int maxBookmarkLen = 255;
byte[] bookmark = new byte[maxBookmarkLen];
long lastGoodPosition = _file.getFilePointer();
int subLen = 0;
int bmLen = 0;
Field subId = new Field();
BookmarkField bookmarkField = new BookmarkField();
Long zero = Long.valueOf(0);
// Create temporary buffered stream for bookmark file. We use a new file
// channel so that closing the stream later won't close _file.
Path path = FileSystems.getDefault().getPath(_fileName);
FileChannel channel = FileChannel.open(path, StandardOpenOption.READ);
channel.position(lastGoodPosition);
DataInputStream in = null;
try
{
in = new DataInputStream(
new BufferedInputStream(Channels.newInputStream(channel),
8192));
long position = lastGoodPosition;
long line = 0;
while(lastGoodPosition < _file.length())
{
long entryStartPos = position;
Field subIdCopy = null;
if (readInts) {
subLen = in.readInt();
position+=4;
}
else {
subLen = in.readUnsignedByte();
position++;
}
if (position < 0 || position + subLen > _file.length())
{
// truncated entry
_file.seek(entryStartPos);
throw new IOException("Invalid subid length " + subLen + " starting at position " + entryStartPos + " line " + line + " in file " + _fileName + " of size " + _file.length());
}
if (subLen > maxSubLen) {
do {
maxSubLen *= 2;
} while (subLen > maxSubLen);
sub = new byte[maxSubLen];
}
in.readFully(sub, 0, subLen);
subId.set(sub, 0, subLen);
position += subLen;
HashMap subscriptionMap = ids.get(subId);
if(subscriptionMap == null)
{
subscriptionMap = new HashMap();
subIdCopy = subId.copy();
ids.put(subIdCopy, subscriptionMap);
}
Subscription subscription = find(subId);
int entryType = in.readUnsignedByte();
position++;
if (readInts) {
bmLen = in.readInt();
position+=4;
}
else {
bmLen = in.readUnsignedByte();
position++;
}
if(bmLen < 0 || position + bmLen > _file.length())
{
// truncated entry
_file.seek(entryStartPos);
throw new IOException("Invalid bookmark len " + bmLen + " starting at position " + entryStartPos + " line " + line + " in file " + _fileName + " of size " + _file.length());
}
if (bmLen > maxBookmarkLen) {
do {
maxBookmarkLen *= 2;
} while (bmLen > maxBookmarkLen);
bookmark = new byte[maxBookmarkLen];
}
in.readFully(bookmark, 0, bmLen);
position += bmLen;
if (in.readUnsignedByte() != (byte)'\n') {
// bad read
_file.seek(entryStartPos);
throw new IOException("Invalid record didn't end with newline starting at position " + entryStartPos + " line " + line + " in file " + _fileName + " of size " + _file.length());
}
position++;
bookmarkField.set(bookmark, 0, bmLen);
switch(entryType)
{
case ENTRY_BOOKMARK:
if (bookmarkField.isRange()) {
try {
// log does all we need
subscription.log(bookmarkField);
} catch (CommandException e) { } // range always valid
}
else {
String bmStr = bookmarkField.getValue(_decoder);
if(subscriptionMap.get(bmStr) != null)
{
subscription.getMostRecent(true);
subscriptionMap.clear();
}
if (!subscription.isDiscarded(bookmarkField))
{
try {
long addedIdx = subscription.log(bookmarkField);
subscriptionMap.put(bmStr, addedIdx);
} catch (CommandException e) { } // no range
}
else
{
subscriptionMap.put(bmStr, zero);
}
}
break;
case ENTRY_DISCARD:
String bkmStr = bookmarkField.getValue(_decoder);
Long subscriptionMapEntry = subscriptionMap.get(bkmStr);
if(subscriptionMapEntry != null)
{
subscriptionMap.remove(bkmStr);
if (subscriptionMapEntry > 0)
{
subscription.discard(subscriptionMapEntry);
}
}
break;
case ENTRY_PERSISTED:
subscription.setLastPersisted(bookmarkField);
break;
default:
throw new IOException("Corrupt file found.");
}
lastGoodPosition = position;
++line;
}
if (_file.getFilePointer() != lastGoodPosition) {
_file.seek(lastGoodPosition);
}
in.close();
in = null;
}
catch (IOException ex)
{
if (in == null) { //Failed to create DataInputStream
_recovering = false;
throw ex;
}
boolean onLastTruncatedLine = true;
try {
// Try to determine if we are on the last line of the file,
// which may have been corrupted due to truncation from an
// abrupt exit.
int len = 0;
byte[] buffer = new byte[255];
while ((len = in.read(buffer, 0, buffer.length)) != -1) {
// Scan for a newline char from the current position.
for (int i = 0; i < len; i++) {
// If we found a newline, then we're not on the
// last line of the file that has been truncated.
if ((byte)'\n' == buffer[i]) {
onLastTruncatedLine = false;
break;
}
}
}
// Now close the stream.
in.close();
in = null;
}
catch (IOException ex2) {
ex.addSuppressed(ex2);
_recovering = false;
throw ex; // Something seems to be seriously wrong, so throw.
}
if (lastGoodPosition > 0 && onLastTruncatedLine)
{
// We only want to seek to the last good position if
// we know we're on the last line of the file and it
// has been truncated (due to an abrupt shutdown).
try {
_file.seek(lastGoodPosition);
}
catch (IOException ex2) {
ex.addSuppressed(ex2);
_recovering = false;
throw ex;
}
}
else
{
// The corruption seems to be somewhere earlier in the
// file, so throw the exception.
_recovering = false;
throw ex;
}
}
finally
{
try {
if (in != null) in.close();
}
catch (IOException ex) {
}
// Because this is only called either in constructor or in
// setServerVersion already under the _subsLock, there is
// no reason to lock _subsLock here.
Iterator> it = _subs.entrySet().iterator();
while (it.hasNext())
{
Map.Entry pairs = it.next();
boolean disposeSub = pairs.getValue().justRecovered();
if (disposeSub)
{
it.remove();
_pool.returnToPool(pairs.getValue());
}
}
_recovering = false;
}
// If this was an older file version, rewrite it
if (!readInts) {
try {
prune();
}
catch (StoreException ex) {
throw new IOException("Failed to rewrite older file version, see inner exception", ex);
}
}
}
public long log(Message message) throws AMPSException
{
BookmarkField bookmark = (BookmarkField)message.getBookmarkRaw();
if (bookmark.equals(Subscription.EPOCH_FIELD)) return 0;
Subscription sub = (LoggedBookmarkStore.Subscription)message.getSubscription();
Field subId = message.getSubIdRaw();
if (subId == null || subId.isNull())
subId = message.getSubIdsRaw();
_lock.lock();
try
{
if(!_recovering && _file == null)
{
throw new StoreException("Store not open.");
}
}
finally
{
_lock.unlock();
}
long index = 0;
try
{
if (sub == null)
{
sub = find(subId);
message.setSubscription(sub);
}
index = sub.log(bookmark);
}
catch (IOException ioex)
{
throw new AMPSException("Error logging to bookmark store", ioex);
}
message.setBookmarkSeqNo(index);
_lock.lock();
try
{
// log the arrival of this bookmark.
write(subId, LoggedBookmarkStore.ENTRY_BOOKMARK, bookmark);
}
catch (IOException ioex)
{
throw new AMPSException("Error logging to bookmark store", ioex);
}
finally
{
_lock.unlock();
}
return index;
}
public void discard(Field subId, long bookmarkSeqNo) throws AMPSException
{
_lock.lock();
try
{
if(!_recovering && _file == null)
{
throw new StoreException("Store not open.");
}
}
finally
{
_lock.unlock();
}
try
{
find(subId).discard(bookmarkSeqNo);
}
catch (IOException ioex)
{
throw new AMPSException("Error discarding from bookmark store", ioex);
}
}
public void discard(Message message) throws AMPSException
{
_lock.lock();
try
{
if(_file == null)
{
throw new StoreException("Store not open.");
}
}
finally
{
_lock.unlock();
}
BookmarkField bookmarkField = message.getBookmarkRaw();
if (bookmarkField.equals(Subscription.EPOCH_FIELD)
|| bookmarkField.isTimestamp() || bookmarkField.isRange()
|| bookmarkField.isBookmarkList())
{
return;
}
long bookmark = message.getBookmarkSeqNo();
Subscription sub = (LoggedBookmarkStore.Subscription)message.getSubscription();
if (sub == null)
{
Field subId = message.getSubIdRaw();
if (subId == null || subId.isNull())
subId = message.getSubIdsRaw();
sub = find(subId);
message.setSubscription(sub);
}
try
{
sub.discard(bookmark);
}
catch (IOException ioex)
{
throw new AMPSException("Error discarding from bookmark store", ioex);
}
}
public Field getMostRecent(Field subId) throws AMPSException
{
return getMostRecent(subId, true);
}
public Field getMostRecent(Field subId, boolean useList) throws AMPSException
{
_lock.lock();
try
{
if(_file == null)
{
throw new StoreException("Store not open.");
}
}
finally
{
_lock.unlock();
}
return find(subId).getMostRecentList(useList).copy();
}
public boolean isDiscarded(Message message) throws AMPSException
{
_lock.lock();
try
{
if(_file == null)
{
throw new StoreException("Store not open.");
}
}
finally
{
_lock.unlock();
}
BookmarkField bookmark = (BookmarkField)message.getBookmarkRaw();
if (bookmark.equals(Subscription.EPOCH_FIELD)) return true;
if (bookmark.isTimestamp() || bookmark.isBookmarkList()) return false;
Subscription sub = (LoggedBookmarkStore.Subscription)message.getSubscription();
if (sub == null)
{
Field subId = message.getSubIdRaw();
if (subId == null || subId.isNull())
subId = message.getSubIdsRaw();
sub = find(subId);
message.setSubscription(sub);
}
try
{
return sub.isDiscarded(bookmark);
}
catch (IOException ioex)
{
throw new AMPSException("Error checking is discarded in bookmark store", ioex);
}
}
/**
* Old style of setting a persisted bookmark no longer used.
* @deprecated use {@link #persisted(Field, BookmarkField)} instead.
*/
@Deprecated
public void persisted(Field subId, long bookmark) throws AMPSException
{
_lock.lock();
try
{
if(_file == null)
{
throw new StoreException("Store not open.");
}
}
finally
{
_lock.unlock();
}
try
{
find(subId).setLastPersisted(bookmark);
}
catch (IOException ioex)
{
throw new AMPSException("Error logging persisted to bookmark store", ioex);
}
}
public void persisted(Field subId, BookmarkField bookmark) throws AMPSException
{
_lock.lock();
try
{
if(_file == null)
{
throw new StoreException("Store not open.");
}
}
finally
{
_lock.unlock();
}
if (bookmark.equals(Subscription.EPOCH_FIELD) || bookmark.isRange())
{
return;
}
try
{
find(subId).setLastPersisted(bookmark);
}
catch (IOException ioex)
{
throw new AMPSException("Error logging persisted to bookmark store", ioex);
}
}
public long getOldestBookmarkSeq(Field subId) throws AMPSException
{
_lock.lock();
try
{
if(_file == null)
{
throw new StoreException("Store not open.");
}
}
finally
{
_lock.unlock();
}
return find(subId).getOldestBookmarkSeq();
}
public void setResizeHandler(BookmarkStoreResizeHandler handler)
{
// The _resizeHandler is only touched under _subsLock
_subsLock.lock();
try {
_resizeHandler = handler;
Iterator it = _subs.entrySet().iterator();
while (it.hasNext())
{
Map.Entry pairs = (Map.Entry)it.next();
((Subscription)pairs.getValue()).setResizeHandler(handler, this);
}
}
finally {
_subsLock.unlock();
}
}
/**
* Finds and returns the Subscription object for the specified
* subscription id (subId).
* @param subId The subId to find or create a Subscription for.
* @return The Subscription associated with the subId.
*/
protected Subscription find(Field subId)
{
_subsLock.lock();
try {
Subscription s = _subs.get(subId);
if(s==null)
{
s=_pool.get();
s.init(subId, this);
s.setResizeHandler(_resizeHandler, this);
_subs.put(subId.copy(), s);
}
return s;
}
finally {
_subsLock.unlock();
}
}
/**
* Remove all entries in the bookmark store, completely
* clearing all record of messages received and discarded.
*/
public void purge() throws AMPSException
{
if(_file == null)
{
throw new StoreException("Store not open.");
}
_purge();
if (_adapter != null) {
try {
_adapter.purge();
}
catch (AMPSException e) {
throw e;
}
catch (Exception e) {
throw new StoreException("Exception in RecoveryPointAdapter.purge()", e);
}
}
}
public void _purge() throws AMPSException
{
_subsLock.lock();
_lock.lock();
try {
_recentChanged = true;
// delete the file on disk.
try
{
_file.setLength(0);
_file.writeInt(VERSION);
_file.writeByte((byte)'\n');
}
catch(IOException ioex)
{
throw new StoreException("Error truncating file", ioex);
}
for (Subscription sub: _subs.values())
{
sub.reset();
_pool.returnToPool(sub);
}
_subs.clear();
}
finally {
_lock.unlock();
_subsLock.unlock();
}
}
/**
* Remove all entries in the bookmark store for a subId.
* @param subId_ The subId to remove from the store.
*/
public void purge(Field subId_) throws AMPSException
{
if(_file == null)
{
throw new StoreException("Store not open.");
}
_purge(subId_);
if (_adapter != null) {
try {
_adapter.purge(subId_);
}
catch (AMPSException e) {
throw e;
}
catch (Exception e) {
throw new StoreException("Exception in RecoveryPointAdapter.purge(" + subId_.toString() + ")", e);
}
}
}
public void _purge(Field subId_) throws AMPSException
{
_subsLock.lock();
String name = "";
try {
// Need to acquire all the Subscription locks here before the
// store lock, because prune(name) will acquire all of them below.
// If we didn't acquire these and some other Subscription is active
// (not the one we're purging) we could get a deadlock if that
// active sub is executing a discard() or persisted() call.
List> subs = _lockSubs();
try {
_lock.lock();
try {
Subscription sub = _subs.remove(subId_);
if (sub == null) return;
sub.reset();
_pool.returnToPool(sub);
name = _fileName + ".tmp";
prune(name);
}
finally {
_lock.unlock();
}
}
finally {
_unlockSubs(subs);
}
}
catch (IOException e) {
throw new StoreException("Underlying IOException while pruning. "
+ "temp bookmark log file = " + name, e);
}
finally {
_subsLock.unlock();
}
}
/**
* Change the RecoveryPointFactory used by this store for its adapter.
* @param factory_ The new RecoveryPointFactory
* @throws AMPSException If one of factory or adapter is null.
*/
public void setRecoveryPointFactory(RecoveryPointFactory factory_) throws AMPSException
{
if (factory_ == null || _adapter == null) {
throw new CommandException("Factory and Adapter must not be null.");
}
_factory = factory_;
}
/**
* Closes the bookmark store. If it is already closed, a StoreException is thrown.
* @throws StoreException if there is an error closing the file backing in the store,
* or the store already closed.
*/
public void close() throws AMPSException
{
_lock.lock();
try {
StoreException ex = null;
if (_adapter != null) {
try
{
_adapter.close();
}
catch(Exception e)
{
ex = new StoreException("Error closing adapter", e);
}
}
if(_file != null) {
try
{
_file.close();
}
catch(IOException ioex)
{
if (ex == null) {
throw new StoreException("Error closing file", ioex);
}
else {
throw new StoreException("Error closing adapter and file " + ioex.toString(), ex);
}
}
}
else {
throw new StoreException("Store not open.");
}
if (ex != null) throw ex;
}
finally {
_adapter = null;
_factory = null;
_file = null;
_lock.unlock();
}
}
/**
* Used to change the version of the AMPS server that this bookmark store's
* client has connected to.
*
* @param version An AMPS server version integer of the form 03080000 for
* version 3.8.0.0.
*/
public void setServerVersion(int version)
{
_serverVersion = version;
}
/**
* Called by Client when connected to an AMPS server in order to retrieve
* the version number of the server. Returns retrieved version number.
* @return the server version, represented as an integer
*/
public int getServerVersion()
{
return _serverVersion;
}
/**
* Used inernally to update the RecoveryPointAdapter if there is one.
* @param subId The subId to update.
* @param bookmark The latest bookmark.
* @throws IOException If there is an exception from the adapter.
*/
protected void adapterUpdate(Field subId, BookmarkField bookmark) throws IOException {
if (_adapter != null) {
try {
_adapter.update(_factory.createRecoveryPoint(subId, bookmark));
}
catch (Exception e) {
throw new IOException("Exception in LoggedBookmarkStore updating the RecoveryPointAdapter", e);
}
}
}
/**
* Locks and returns the current list of Subscriptions.
* Used internally by prune() to gather and lock the list of subscriptions
* that we need to prune. We hold self's lock here to get the subscription
* list and then release it to lock up all of the subscriptions.
*/
private List> _lockSubs()
{
// The _subsLock MUST already be held
// No need to hold self's _lock here.
List> subs
= new Vector>(_subs.size());
for(Map.Entry entry : _subs.entrySet())
{
subs.add(entry);
}
// Lock the subscriptions without self's lock held.
for(Map.Entry entry : subs)
{
entry.getValue().lock();
}
return subs;
}
/**
* Unlocks the provided list of Subscriptions.
* Used internally by prune() to gather and lock the list of subscriptions
* that we need to prune. We hold self's lock here to get the subscription
* list and then release it to lock up all of the subscriptions.
*/
private void _unlockSubs(List> subs_)
{
// The _subsLock MUST already be held
// No need to hold self's _lock here.
// We are just unlocking the individual Subscriptions that were
// held for the duration of prune().
for(Map.Entry entry : subs_)
{
entry.getValue().unlock();
}
}
}