com.nimbusds.infinispan.persistence.dynamodb.DynamoDBStore Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of infinispan-cachestore-dynamodb Show documentation
Show all versions of infinispan-cachestore-dynamodb Show documentation
Infinispan module for persisting data to an AWS DynamoDB table
The newest version!
package com.nimbusds.infinispan.persistence.dynamodb;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
import com.amazonaws.services.dynamodbv2.document.DynamoDB;
import com.amazonaws.services.dynamodbv2.document.Index;
import com.amazonaws.services.dynamodbv2.document.Item;
import com.amazonaws.services.dynamodbv2.document.Table;
import com.amazonaws.services.dynamodbv2.document.spec.GetItemSpec;
import com.amazonaws.services.dynamodbv2.document.spec.UpdateTableSpec;
import com.amazonaws.services.dynamodbv2.model.*;
import com.codahale.metrics.Timer;
import com.nimbusds.infinispan.persistence.common.InfinispanEntry;
import com.nimbusds.infinispan.persistence.common.InfinispanStore;
import com.nimbusds.infinispan.persistence.common.InternalMetadataBuilder;
import com.nimbusds.infinispan.persistence.common.query.QueryExecutor;
import com.nimbusds.infinispan.persistence.dynamodb.config.DynamoDBStoreConfiguration;
import com.nimbusds.infinispan.persistence.dynamodb.logging.Loggers;
import com.nimbusds.infinispan.persistence.dynamodb.query.DynamoDBQueryExecutor;
import com.nimbusds.infinispan.persistence.dynamodb.query.DynamoDBQueryExecutorInitContext;
import io.reactivex.rxjava3.core.Flowable;
import net.jcip.annotations.ThreadSafe;
import org.infinispan.commons.configuration.ConfiguredBy;
import org.infinispan.commons.persistence.Store;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.persistence.spi.PersistenceException;
import org.kohsuke.MetaInfServices;
import org.reactivestreams.Publisher;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.time.Instant;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
/**
* AWS DynamoDB store for Infinispan caches and maps.
*/
@ThreadSafe
@MetaInfServices
@ConfiguredBy(DynamoDBStoreConfiguration.class)
@Store(shared = true)
public class DynamoDBStore extends InfinispanStore {
/**
* The DynamoDB configuration.
*/
private DynamoDBStoreConfiguration config;
/**
* The DynamoDB HTTP client.
*/
private AmazonDynamoDB client;
/**
* The DynamoDB table API client.
*/
private Table table;
/**
* The DynamoDB item transformer (to / from Infinispan entries).
*/
private DynamoDBItemTransformer itemTransformer;
/**
* The optional DynamoDB query executor.
*/
private DynamoDBQueryExecutor queryExecutor;
/**
* The DynamoDB request factory.
*/
private RequestFactory requestFactory;
/**
* Optional item HMAC SHA-256 security.
*/
private ItemHMAC itemHMAC;
/**
* The marshalled Infinispan entry factory.
*/
private MarshallableEntryFactory marshalledEntryFactory;
/**
* Purges expired entries found in the DynamoDB store, as indicated by
* their persisted metadata (optional, may be ignored / not stored).
*/
private ExpiredEntryReaper reaper;
/**
* DynamoDB meters.
*/
private DynamoDBMeters meters;
/**
* Loads a DynamoDB item transformer with the specified class name.
*
* @param clazz The class. Must not be {@code null}.
*
* @return The DynamoDB entry transformer.
*/
private DynamoDBItemTransformer loadItemTransformerClass(final Class> clazz) {
try {
@SuppressWarnings( "unchecked" )
Class> genClazz = (Class>)clazz;
return genClazz.getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new PersistenceException("Couldn't load DynamoDB item transformer class: " + e.getMessage(), e);
}
}
/**
* Loads a DynamoDB query executor with the specified class name.
*
* @param clazz The class. Must not be {@code null}.
*
* @return The DynamoDB query executor.
*/
private DynamoDBQueryExecutor loadQueryExecutorClass(final Class> clazz) {
try {
@SuppressWarnings( "unchecked" )
Class> genClazz = (Class>)clazz;
return genClazz.getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new PersistenceException("Couldn't load DynamoDB query executor class: " + e.getMessage(), e);
}
}
/**
* Returns the DynamoDB store configuration.
*
* @return The DynamoDB store configuration, {@code null} if not
* initialised.
*/
public DynamoDBStoreConfiguration getConfiguration() {
return config;
}
/**
* Returns the underlying DynamoDB table.
*
* @return The DynamoDB table, {@code null} if not initialised.
*/
public Table getTable() {
return table;
}
/**
* Returns the configured DynamoDB item transformer.
*
* @return The DynamoDB item transformer, {@code null} if not
* initialised.
*/
public DynamoDBItemTransformer getItemTransformer() {
return itemTransformer;
}
@Override
public QueryExecutor getQueryExecutor() {
return queryExecutor;
}
/**
* Returns the DynamoDB timers.
*
* @return The timers, {@code null} if not initialised.
*/
public DynamoDBMeters getMeters() {
return meters;
}
@Override
public void init(final InitializationContext ctx) {
// This method will be invoked by the PersistenceManager during initialization. The InitializationContext
// contains:
// - this CacheLoader's configuration
// - the cache to which this loader is applied. Your loader might want to use the cache's name to construct
// cache-specific identifiers
// - the StreamingMarshaller that needs to be used to marshall/unmarshall the entries
// - a TimeService which the loader can use to determine expired entries
// - a ByteBufferFactory which needs to be used to construct ByteBuffers
// - a MarshalledEntryFactory which needs to be used to construct entries from the data retrieved by the loader
super.init(ctx);
this.config = ctx.getConfiguration();
// Log the configuration
Loggers.MAIN_LOG.info("[DS0100] DynamoDB store: Infinispan cache store configuration for {}:", getCacheName());
config.log();
// Create a DynamoDB client
AmazonDynamoDBClientBuilder builder = AmazonDynamoDBClientBuilder
.standard()
.withCredentials(DefaultAWSCredentialsProviderChain.getInstance());
if (config.getEndpoint() != null && ! config.getEndpoint().trim().isEmpty()) {
builder = builder.withEndpointConfiguration(
new AwsClientBuilder.EndpointConfiguration(
config.getEndpoint(),
null // inferred from endpoint
)
);
}
if (config.getRegion() != null) {
builder = builder.withRegion(config.getRegion());
}
if (config.getHTTPProxyHost() != null) {
ClientConfiguration clientConfig = new ClientConfiguration().withProxyHost(config.getHTTPProxyHost());
if (config.getHTTPProxyPort() > -1) {
clientConfig.withProxyPort(config.getHTTPProxyPort());
}
builder = builder.withClientConfiguration(clientConfig);
}
try {
itemHMAC = new ItemHMAC(config.getHMACSHA256Key());
} catch (InvalidKeyException e) {
throw new PersistenceException(e.getMessage(), e);
}
client = builder.build();
Loggers.MAIN_LOG.info("[DS0140] DynamoDB store: Expiration thread wake up interval for cache {}: {}", getCacheName(),
ctx.getCache().getCacheConfiguration().expiration().wakeUpInterval());
// Load and initialise the DynamoDB item transformer
Loggers.MAIN_LOG.debug("[DS0101] Loading DynamoDB item transformer class {} for cache {}...",
config.getItemTransformerClass(),
getCacheName());
itemTransformer = loadItemTransformerClass(config.getItemTransformerClass());
itemTransformer.init(() -> config.isEnableTTL());
requestFactory = new RequestFactory<>(
itemTransformer,
config.getIndexAttributes(),
config.useConsistentReads(),
config.getProvisionedThroughputForCreateTable(),
config.isCreateTableWithEncryptionAtRest(),
config.isEnableDeletionProtection(),
config.getTablePrefix(),
config.getRangeKeyToApply(),
config.getRangeKeyValue(),
config.isCreateTableWithStream());
table = new DynamoDB(client).getTable(requestFactory.getTableName());
// Load and initialise the optional query executor
if (config.getQueryExecutorClass() != null) {
Loggers.MAIN_LOG.debug("[DS0130] Loading optional DynamoDB query executor class {} for cache {}...",
config.getQueryExecutorClass(),
getCacheName());
queryExecutor = loadQueryExecutorClass(config.getQueryExecutorClass());
queryExecutor.init(new DynamoDBQueryExecutorInitContext<>() {
@Override
public DynamoDBItemTransformer getDynamoDBItemTransformer() {
return itemTransformer;
}
@Override
public Table getDynamoDBTable() {
return table;
}
@Override
public Index getDynamoDBIndex(final String attributeName) {
if (config.getIndexAttributes() != null && config.getIndexAttributes().contains(attributeName)) {
return table.getIndex(requestFactory.getGSIName(attributeName));
} else {
return null;
}
}
});
}
marshalledEntryFactory = ctx.getMarshallableEntryFactory();
final String metricsPrefix = ctx.getCache().getName() + ".";
if (config.getMetricRegistry() == null)
meters = new DynamoDBMeters(metricsPrefix);
else
meters = new DynamoDBMeters(metricsPrefix, config.getMetricRegistry());
Loggers.MAIN_LOG.info("[DS0102] Initialized DynamoDB store for cache {} with table {}",
getCacheName(),
table.getTableName());
}
@Override
public void start() {
// This method will be invoked by the PersistenceManager to start the CacheLoader. At this stage configuration
// is complete and the loader can perform operations such as opening a connection to the external storage,
// initialize internal data structures, etc.
CreateTableRequest ctr = requestFactory.resolveCreateTableRequest();
try {
table = new DynamoDB(client)
.createTable(
ctr.withProvisionedThroughput(config.getProvisionedThroughputForCreateTable())
);
Loggers.MAIN_LOG.info("[DS0129] DynamoDB store: Created table {} for cache {}", table.getTableName(), getCacheName());
} catch (ResourceInUseException e) {
// table exists
Loggers.MAIN_LOG.info("[DS0133] DynamoDB store: Table {} for cache {} exists", table.getTableName(), getCacheName());
} catch (Exception e) {
Loggers.MAIN_LOG.fatal("[DS0103] DynamoDB store: Couldn't create table {} with {}: {}: {}", table.getTableName(), ctr, e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
try {
table.waitForActive();
} catch (InterruptedException e) {
throw new PersistenceException("Interrupted while awaiting DynamoDB table " + table.getTableName() + " to become active: " + e.getMessage(), e);
}
Loggers.MAIN_LOG.info("[DS0141] DynamoDB store: Table properties: {}", table.getDescription());
if (config.isEnableDeletionProtection() && ! table.getDescription().getDeletionProtectionEnabled()) {
Loggers.MAIN_LOG.info("[DS0146] DynamoDB store: Enabling deletion protection for table {}", table.getTableName());
client.updateTable(new UpdateTableRequest().withTableName(table.getTableName()).withDeletionProtectionEnabled(true));
}
ContinuousBackupsStatus contBackupStatus = null;
PointInTimeRecoveryStatus pitrStatus = null;
try {
// Not available on localhost DynamoDB
DescribeContinuousBackupsResult result = client.describeContinuousBackups(new DescribeContinuousBackupsRequest().withTableName(table.getTableName()));
contBackupStatus = ContinuousBackupsStatus.fromValue(result.getContinuousBackupsDescription().getContinuousBackupsStatus());
pitrStatus = PointInTimeRecoveryStatus.fromValue(result.getContinuousBackupsDescription().getPointInTimeRecoveryDescription().getPointInTimeRecoveryStatus());
Loggers.MAIN_LOG.info("[DS0143] DynamoDB store: Continuous backup status for table {}: {}", table.getTableName(), contBackupStatus);
Loggers.MAIN_LOG.info("[DS0144] DynamoDB store: Point in time recovery status for table {}: {}", table.getTableName(), pitrStatus);
} catch (Exception e) {
Loggers.MAIN_LOG.error("[DS0145] DynamoDB store: Couldn't obtain continuous backup status for table {}: {}", table.getTableName(), e.getMessage());
}
if ( (ContinuousBackupsStatus.DISABLED.equals(contBackupStatus) || PointInTimeRecoveryStatus.DISABLED.equals(pitrStatus))
&& config.isEnableContinuousBackups()) {
Loggers.MAIN_LOG.info("[DS0142] DynamoDB store: Enabling continuous backups / PITR for table {}", table.getTableName());
boolean contBackupEnabled = false;
int retryCount = 0;
do {
// Wait extra after table creation to prevent potential
// ContinuousBackupsUnavailableException: Backups are being enabled for the table: [table-name].
// Please retry later (Service: AmazonDynamoDBv2; Status Code: 400; Error Code: ContinuousBackupsUnavailableException; Request ID: [id]; Proxy: null)
try {
TimeUnit.SECONDS.sleep(1 + retryCount);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
try {
client.updateContinuousBackups(
new UpdateContinuousBackupsRequest()
.withTableName(table.getTableName())
.withPointInTimeRecoverySpecification(
new PointInTimeRecoverySpecification()
.withPointInTimeRecoveryEnabled(true)));
contBackupEnabled = true; // success
} catch (Exception e) {
if (e instanceof ContinuousBackupsUnavailableException && retryCount++ < 5) {
String msg = "Couldn't set up continuous backups for table " + table.getTableName() + ", retrying: " + e.getMessage();
Loggers.MAIN_LOG.info("[DS0104] DynamoDB store: {}", msg);
continue;
}
String msg = "Couldn't set up continuous backups for table " + table.getTableName() + ": " + e.getMessage();
Loggers.MAIN_LOG.fatal("[DS0104] DynamoDB store: {}", msg);
throw new PersistenceException(msg, e);
}
} while (! contBackupEnabled);
}
TimeToLiveStatus currentTTLStatus = null;
try {
// Not available on localhost DynamoDB
DescribeTimeToLiveResult result = client.describeTimeToLive(new DescribeTimeToLiveRequest().withTableName(table.getTableName()));
currentTTLStatus = TimeToLiveStatus.fromValue(result.getTimeToLiveDescription().getTimeToLiveStatus());
Loggers.MAIN_LOG.info("[DS0161] DynamoDB store: TTL for table {}: status={} attribute={}",
table.getTableName(),
currentTTLStatus,
result.getTimeToLiveDescription().getAttributeName());
} catch (Exception e) {
Loggers.MAIN_LOG.error("[DS0162] DynamoDB store: Couldn't obtain TTL status for table {}: {}", table.getTableName(), e.getMessage());
}
if (TimeToLiveStatus.DISABLED.equals(currentTTLStatus) && config.isEnableTTL() && requestFactory.getItemTransformer().getTTLAttributeName() != null) {
Loggers.MAIN_LOG.info("[DS0163] DynamoDB store: Enabling TTL for table {}", table.getTableName());
try {
client.updateTimeToLive(
new UpdateTimeToLiveRequest()
.withTableName(table.getTableName())
.withTimeToLiveSpecification(
new TimeToLiveSpecification()
.withAttributeName(requestFactory.getItemTransformer().getTTLAttributeName())
.withEnabled(true)));
} catch (Exception e) {
String msg = "Couldn't set up TTL for table " + table.getTableName() + ": " + e.getMessage();
Loggers.MAIN_LOG.fatal("[DS0160] DynamoDB store: {}", msg);
throw new PersistenceException(msg, e);
}
}
reaper = new ExpiredEntryReaper<>(
marshalledEntryFactory,
table,
requestFactory,
config.getPurgeMaxReadCapacity(),
config.getPurgeLimit(),
meters.purgeTimer,
meters.deleteTimer);
Loggers.MAIN_LOG.info("[DS0134] Resolved absolute purge max read capacity: {}", reaper.resolveAbsolutePurgeMaxReadCapacity());
Loggers.MAIN_LOG.info("[DS0104] Started DynamoDB external store connector for cache {} with table {}", getCacheName(), table.getTableName());
}
@Override
public void stop() {
super.stop();
if (client != null) {
client.shutdown();
}
Loggers.MAIN_LOG.info("[DS0105] Stopped DynamoDB store connector for cache {}", getCacheName());
}
@Override
public boolean contains(final Object key) {
// This method will be invoked by the PersistenceManager to determine if the loader contains the specified key.
// The implementation should be as fast as possible, e.g. it should strive to transfer the least amount of data possible
// from the external storage to perform the check. Also, if possible, make sure the field is indexed on the external storage
// so that its existence can be determined as quickly as possible.
//
// Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol
// such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
Loggers.DYNAMODB_LOG.trace("[DS0106] DynamoDB store: Checking {} cache key {}", getCacheName(), key);
try (Timer.Context timerCtx = meters.getTimer.time()) {
return table.getItem(requestFactory.resolveGetItemSpec(key)) != null;
} catch (Exception e) {
Loggers.DYNAMODB_LOG.error("[DS0107] {}: {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
}
@Override
public MarshallableEntry loadEntry(final Object key) {
Loggers.DYNAMODB_LOG.trace("[DS0108] DynamoDB store: Loading {} cache entry with key {}", getCacheName(), key);
GetItemSpec getItemSpec = requestFactory.resolveGetItemSpec(key);
Item item;
try (Timer.Context timerCtx = meters.getTimer.time()) {
item = table.getItem(getItemSpec);
} catch (Exception e) {
Loggers.DYNAMODB_LOG.error("[DS0109] {}, {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
if (item == null) {
// Not found
Loggers.DYNAMODB_LOG.trace("[DS0110] DynamoDB store: Item with key {} not found", key);
return null;
}
Loggers.DYNAMODB_LOG.trace("[DS0111] DynamoDB store: Retrieved {} cache item: {}", this::getCacheName, item::toJSON);
try {
item = itemHMAC.verify(item);
} catch (InvalidHMACException e) {
meters.invalidItemHmacCounter.inc();
Loggers.DYNAMODB_LOG.error("[DS0131] DynamoDB store: Invalid item HMAC in {}: {}", getCacheName(), e.getMessage());
throw new PersistenceException(e.getMessage(), e);
} catch (NoSuchAlgorithmException | InvalidKeyException e) {
throw new PersistenceException(e.getMessage(), e);
}
// Transform DynamoDB entry to Infinispan entry
InfinispanEntry infinispanEntry = itemTransformer.toInfinispanEntry(item);
if (infinispanEntry.isExpired()) {
Loggers.DYNAMODB_LOG.trace("[DS0114] DynamoDB store: Item with key {} expired", key);
return null;
}
return marshalledEntryFactory.create(
infinispanEntry.getKey(),
infinispanEntry.getValue(),
infinispanEntry.getMetadata(),
null,
infinispanEntry.created(),
infinispanEntry.lastUsed()
);
}
@Override
public boolean delete(final Object key) {
// The CacheWriter should remove from the external storage the entry identified by the specified key.
// Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol
// such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
Loggers.DYNAMODB_LOG.trace("[DS0112] DynamoDB store: Deleting {} cache entry with key {}", getCacheName(), key);
final boolean deleted;
try (Timer.Context timerCtx = meters.deleteTimer.time()) {
deleted = table.deleteItem(requestFactory.resolveDeleteItemSpec(key)).getItem() != null;
} catch (Exception e) {
Loggers.DYNAMODB_LOG.error("[DS0113] {}, {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
if (deleted) {
Loggers.DYNAMODB_LOG.trace("[DS0116] DynamoDB store: Deleted {} cache item with key {}", getCacheName(), key);
}
return deleted;
}
@Override
public void write(final MarshallableEntry extends K, ? extends V> entry) {
// The CacheWriter should write the specified entry to the external storage.
//
// The PersistenceManager uses MarshalledEntry as the default format so that CacheWriters can efficiently store data coming
// from a remote node, thus avoiding any additional transformation steps.
//
// Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol
// such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
Loggers.DYNAMODB_LOG.trace("[DS0115] DynamoDB store: Writing {} cache entry {}", getCacheName(), entry);
try (Timer.Context timerCtx = meters.putTimer.time()) {
Item item = requestFactory.resolveItem(
new InfinispanEntry<>(
entry.getKey(),
entry.getValue(),
new InternalMetadataBuilder()
.created(entry.created())
.lastUsed(entry.lastUsed())
.lifespan(entry.getMetadata() != null ? entry.getMetadata().lifespan() : -1L)
.maxIdle(entry.getMetadata() != null ? entry.getMetadata().maxIdle() : -1L)
.build()));
item = itemHMAC.apply(item);
Loggers.DYNAMODB_LOG.trace("[DS0132] DynamoDB store: Writing {} cache item: {}", this::getCacheName, item::toJSON);
table.putItem(item);
} catch (Exception e) {
Loggers.DYNAMODB_LOG.error("[DS0117] {}: {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
}
@Override
public Publisher> entryPublisher(final Predicate super K> filter, final boolean fetchValue, final boolean fetchMetadata) {
Loggers.DYNAMODB_LOG.trace("[DS0118] DynamoDB store: Processing key filter for {} cache: fetchValue={} fetchMetadata={}",
getCacheName(), fetchValue, fetchMetadata);
final Instant now = Instant.now();
return Flowable.using(meters.processTimer::time,
ignore -> Flowable.fromIterable(requestFactory.getAllItems(table))
.map(itemTransformer::toInfinispanEntry)
.filter(infinispanEntry -> filter == null || filter.test(infinispanEntry.getKey()))
.filter(infinispanEntry -> ! infinispanEntry.isExpired(now))
.map(infinispanEntry -> marshalledEntryFactory.create(
infinispanEntry.getKey(),
infinispanEntry.getValue(),
infinispanEntry.getMetadata(),
PrivateMetadata.empty(),
infinispanEntry.created(),
infinispanEntry.lastUsed()
))
.doOnError(e -> Loggers.DYNAMODB_LOG.error("[DS0119] {}: {}", e.getMessage(), e)),
Timer.Context::stop);
}
@Override
public int size() {
// Infinispan code analysis on 8.2 and 9.4.11 shows that this method is never called in practice, and
// is not wired to the data / cache container API
// TODO inaccurate when a range key is applied!
Loggers.DYNAMODB_LOG.trace("[DS0120] DynamoDB store: Counting {} cache items", getCacheName());
final int count;
try {
count = table.describe().getItemCount().intValue();
} catch (Exception e) {
Loggers.DYNAMODB_LOG.error("[DS0121] {}: {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
Loggers.DYNAMODB_LOG.trace("[DS0122] DynamoDB store: Reported approximately {} {} items", count, getCacheName());
return count;
}
@Override
public void clear() {
Loggers.DYNAMODB_LOG.trace("[DS0123] DynamoDB store: Clearing {} items", getCacheName());
if (requestFactory.getRangeKeyResolvedName() != null) {
throw new PersistenceException("DynamoDB clear operation not supported with applied range key");
}
try {
DeleteTableResult result = table.delete();
int numDeleted = result.getTableDescription().getItemCount().intValue();
Loggers.DYNAMODB_LOG.info("[DS0125] DynamoDB store: Cleared {} {} items", numDeleted, table.getTableName());
table.waitForDelete();
client.createTable(requestFactory.resolveCreateTableRequest());
table.waitForActive();
} catch (Exception e) {
Loggers.DYNAMODB_LOG.error("[DS0124] {}: {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
}
@Override
public void purge(final Executor executor, final PurgeListener super K> purgeListener) {
Loggers.DYNAMODB_LOG.trace("[DS0126] DynamoDB store: Purging {} cache entries", getCacheName());
try {
executor.execute(() -> reaper.purge(purgeListener));
} catch (Exception e) {
Loggers.DYNAMODB_LOG.error("[DS0127] {}: {}", e.getMessage(), e);
throw new PersistenceException("Purge exception: " + e.getMessage(), e);
}
}
@Override
public void purge(final Executor executor, final ExpirationPurgeListener purgeListener) {
Loggers.DYNAMODB_LOG.trace("[DS0150] DynamoDB store: Purging {} cache entries", getCacheName());
try {
executor.execute(() -> reaper.purgeExtended(purgeListener));
} catch (Exception e) {
Loggers.DYNAMODB_LOG.error("[DS0151] {}: {}", e.getMessage(), e);
throw new PersistenceException("Purge exception: " + e.getMessage(), e);
}
}
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy