com.nimbusds.infinispan.persistence.sql.SQLStore Maven / Gradle / Ivy
package com.nimbusds.infinispan.persistence.sql;
import java.util.Properties;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import javax.sql.DataSource;
import static org.jooq.impl.DSL.table;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.health.HealthCheckRegistry;
import com.nimbusds.common.monitor.MonitorRegistries;
import com.nimbusds.infinispan.persistence.common.InfinispanEntry;
import com.nimbusds.infinispan.persistence.common.InfinispanStore;
import com.nimbusds.infinispan.persistence.common.query.QueryExecutor;
import com.nimbusds.infinispan.persistence.sql.config.SQLStoreConfiguration;
import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutor;
import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutorInitContext;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import net.jcip.annotations.ThreadSafe;
import org.infinispan.commons.configuration.ConfiguredBy;
import org.infinispan.filter.KeyFilter;
import org.infinispan.marshall.core.MarshalledEntry;
import org.infinispan.marshall.core.MarshalledEntryFactory;
import org.infinispan.persistence.TaskContextImpl;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.PersistenceException;
import org.jooq.DSLContext;
import org.jooq.Merge;
import org.jooq.Record;
import org.jooq.SQLDialect;
import org.jooq.conf.RenderNameStyle;
import org.jooq.conf.Settings;
import org.jooq.impl.DSL;
import org.kohsuke.MetaInfServices;
/**
* SQL store for Infinispan 8.2+ caches and maps.
*/
@ThreadSafe
@MetaInfServices
@ConfiguredBy(SQLStoreConfiguration.class)
public class SQLStore extends InfinispanStore {
/**
* The SQL store configuration.
*/
private SQLStoreConfiguration config;
/**
* The Hikari SQL data source (with connection pool).
*/
private HikariDataSource dataSource;
/**
* Wrap the SQL data source with jOOQ.
* See http://stackoverflow.com/a/31389342/429425
*/
private DSLContext sql;
/**
* The SQL record transformer (to / from Infinispan entries).
*/
private SQLRecordTransformer sqlRecordTransformer;
/**
* The optional SQL query executor.
*/
private SQLQueryExecutor sqlQueryExecutor;
/**
* The marshalled Infinispan entry factory.
*/
private MarshalledEntryFactory marshalledEntryFactory;
/**
* Purges expired entries found in the SQL store, as indicated by
* their persisted metadata (optional, may be ignored / not stored).
*/
private ExpiredEntryReaper reaper;
/**
* jOOQ query fixes.
*/
private JOOQFixes jooqFixes;
/**
* Loads an SQL record transformer with the specified class name.
*
* @param clazz The class. Must not be {@code null}.
*
* @return The SQL entry transformer.
*/
@SuppressWarnings( "unchecked" )
private SQLRecordTransformer loadRecordTransformerClass(final Class clazz) {
try {
Class> genClazz = (Class>)clazz;
return genClazz.newInstance();
} catch (Exception e) {
throw new PersistenceException("Couldn't load SQL record transformer class: " + e.getMessage(), e);
}
}
/**
* Loads an SQL query executor with the specified class name.
*
* @param clazz The class. Must not be {@code null}.
*
* @return The SQL query executor.
*/
@SuppressWarnings( "unchecked" )
private SQLQueryExecutor loadQueryExecutorClass(final Class clazz) {
try {
Class> genClazz = (Class>)clazz;
return genClazz.newInstance();
} catch (Exception e) {
throw new PersistenceException("Couldn't load SQL query executor class: " + e.getMessage(), e);
}
}
/**
* Returns the SQL store configuration.
*
* @return The SQL store configuration, {@code null} if not
* initialised.
*/
public SQLStoreConfiguration getConfiguration() {
return config;
}
/**
* Returns the underlying SQL data source.
*
* @return The underlying SQL data source, {@code null} if not
* initialised.
*/
public DataSource getDataSource() {
return dataSource;
}
@Override
@SuppressWarnings("unchecked")
public void init(final InitializationContext ctx) {
// This method will be invoked by the PersistenceManager during initialization. The InitializationContext
// contains:
// - this CacheLoader's configuration
// - the cache to which this loader is applied. Your loader might want to use the cache's name to construct
// cache-specific identifiers
// - the StreamingMarshaller that needs to be used to marshall/unmarshall the entries
// - a TimeService which the loader can use to determine expired entries
// - a ByteBufferFactory which needs to be used to construct ByteBuffers
// - a MarshalledEntryFactory which needs to be used to construct entries from the data retrieved by the loader
super.init(ctx);
this.config = ctx.getConfiguration();
Loggers.MAIN_LOG.info("[IS0100] SQL store: Infinispan cache store configuration for {}:", getCacheName());
config.log();
Loggers.MAIN_LOG.info("[IS0140] SQL store: Expiration thread wake up interval for cache {}: {}", getCacheName(),
ctx.getCache().getCacheConfiguration().expiration().wakeUpInterval());
// Load and initialise the SQL record transformer
Loggers.MAIN_LOG.debug("[IS0101] Loading SQL record transformer class {} for cache {}...",
config.getRecordTransformerClass(),
getCacheName());
sqlRecordTransformer = loadRecordTransformerClass(config.getRecordTransformerClass());
sqlRecordTransformer.init(() -> config.getSQLDialect());
jooqFixes = new JOOQFixes(config.getSQLDialect(), sqlRecordTransformer.getCreateTableStatement());
// Load and initialise the optional SQL query executor
if (config.getQueryExecutorClass() != null) {
Loggers.MAIN_LOG.debug("[IS0201] Loading optional SQL query executor class {} for cache {}...",
config.getQueryExecutorClass(),
getCacheName());
sqlQueryExecutor = loadQueryExecutorClass(config.getQueryExecutorClass());
sqlQueryExecutor.init(new SQLQueryExecutorInitContext() {
@Override
public DataSource getDataSource() {
return dataSource;
}
@Override
public SQLRecordTransformer getSQLRecordTransformer() {
return sqlRecordTransformer;
}
@Override
public SQLDialect getSQLDialect() {
return config.getSQLDialect();
}
});
}
marshalledEntryFactory = (MarshalledEntryFactory)ctx.getMarshalledEntryFactory();
Loggers.MAIN_LOG.info("[IS0102] Initialized SQL external store for cache {} with table {}",
getCacheName(),
sqlRecordTransformer.getTableName());
}
@Override
public QueryExecutor getQueryExecutor() {
return sqlQueryExecutor;
}
@Override
public void start() {
// This method will be invoked by the PersistenceManager to start the CacheLoader. At this stage configuration
// is complete and the loader can perform operations such as opening a connection to the external storage,
// initialize internal data structures, etc.
Properties hikariProps = HikariConfigUtils.removeNonHikariProperties(config.properties());
HikariPoolName poolName = HikariPoolName.setDefaultPoolName(hikariProps, getCacheName());
HikariConfig hikariConfig = new HikariConfig(hikariProps);
MetricRegistry metricRegistry = MonitorRegistries.getMetricRegistry();
if (HikariConfigUtils.metricsAlreadyRegistered(poolName, metricRegistry)) {
Loggers.MAIN_LOG.warn("[IS0130] SQL store: Couldn't register Dropwizard metrics: Existing registered metrics for " + getCacheName());
} else {
hikariConfig.setMetricRegistry(metricRegistry);
}
HealthCheckRegistry healthCheckRegistry = MonitorRegistries.getHealthCheckRegistry();
if (HikariConfigUtils.healthChecksAlreadyRegistered(poolName, healthCheckRegistry)) {
Loggers.MAIN_LOG.warn("[IS0131] SQL store: Couldn't register Dropwizard health checks: Existing registered health checks for " + getCacheName());
} else {
hikariConfig.setHealthCheckRegistry(healthCheckRegistry);
}
dataSource = new HikariDataSource(hikariConfig);
// Init jOOQ SQL context
Settings jooqSettings = new Settings();
if (config.getSQLDialect().equals(SQLDialect.H2)) {
// Quoted column names occasionally cause problems in H2
jooqSettings.setRenderNameStyle(RenderNameStyle.AS_IS);
}
sql = DSL.using(dataSource, config.getSQLDialect(), jooqSettings);
// Create table if missing
try {
int rows = sql.execute(sqlRecordTransformer.getCreateTableStatement());
if (rows > 0) {
Loggers.MAIN_LOG.info("[IS0129] SQL store: Created table {} for cache {}", sqlRecordTransformer.getTableName(), getCacheName());
}
} catch (Exception e) {
Loggers.MAIN_LOG.fatal("[IS0103] SQL store: Create table if not exists failed: {}: e", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
Loggers.MAIN_LOG.info("[IS0104] Started SQL external store connector for cache {} with table {}", getCacheName(), sqlRecordTransformer.getTableName());
reaper = new ExpiredEntryReaper<>(sqlRecordTransformer);
}
@Override
public void stop() {
super.stop();
dataSource.close();
Loggers.MAIN_LOG.info("[IS0105] Stopped SQL store connector for cache {}", getCacheName());
}
@SuppressWarnings("unchecked")
private K resolveKey(final Object key) {
if (key instanceof byte[]) {
throw new PersistenceException("Cannot resolve " + getCacheName() + " cache key from byte[], enable compatibility mode");
}
return (K)key;
}
@Override
public boolean contains(final Object key) {
// This method will be invoked by the PersistenceManager to determine if the loader contains the specified key.
// The implementation should be as fast as possible, e.g. it should strive to transfer the least amount of data possible
// from the external storage to perform the check. Also, if possible, make sure the field is indexed on the external storage
// so that its existence can be determined as quickly as possible.
//
// Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol
// such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
Loggers.SQL_LOG.trace("[IS0106] SQL store: Checking {} cache key {}", getCacheName(), key);
try {
return sql.selectOne()
.from(table(sqlRecordTransformer.getTableName()))
.where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
.fetchOne() != null;
} catch (Exception e) {
Loggers.SQL_LOG.error("[IS0107] {}: {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
}
@Override
@SuppressWarnings("unchecked")
public MarshalledEntry load(final Object key) {
// Fetches an entry from the storage using the specified key. The CacheLoader should retrieve from the external storage all
// of the data that is needed to reconstruct the entry in memory, i.e. the value and optionally the metadata. This method
// needs to return a MarshalledEntry which can be constructed as follows:
//
// ctx.getMarshalledEntryFactory().new MarshalledEntry(key, value, metadata);
//
// If the entry does not exist or has expired, this method should return null.
// If an error occurs while retrieving data from the external storage, this method should throw a PersistenceException
//
// Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol
// such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
// If the loader needs to have knowledge of the key/value data beyond their binary representation, then it needs access to the key's and value's
// classes and the marshaller used to encode them.
Loggers.SQL_LOG.trace("[IS0108] SQL store: Loading {} cache entry with key {}", getCacheName(), key);
final Record record;
try {
record = sql.selectFrom(table(sqlRecordTransformer.getTableName()))
.where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
.fetchOne();
} catch (Exception e) {
Loggers.SQL_LOG.error("[IS0109] {}, {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
if (record == null) {
// Not found
Loggers.SQL_LOG.trace("[IS0110] SQL store: Record with key {} not found", key);
return null;
}
if (Loggers.SQL_LOG.isTraceEnabled()) {
Loggers.SQL_LOG.trace("[IS0111] SQL store: Retrieved record: {}", record);
}
// Transform LDAP entry to Infinispan entry
InfinispanEntry infinispanEntry = sqlRecordTransformer.toInfinispanEntry(record);
return marshalledEntryFactory.newMarshalledEntry(
infinispanEntry.getKey(),
infinispanEntry.getValue(),
infinispanEntry.getMetadata());
}
@Override
public boolean delete(final Object key) {
// The CacheWriter should remove from the external storage the entry identified by the specified key.
// Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol
// such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
Loggers.SQL_LOG.trace("[IS0112] SQL store: Deleting {} cache entry with key {}", getCacheName(), key);
int deletedRows;
try {
deletedRows = sql.deleteFrom(table(sqlRecordTransformer.getTableName()))
.where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
.execute();
} catch (Exception e) {
Loggers.SQL_LOG.error("[IS0113] {}, {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
Loggers.SQL_LOG.trace("[IS0113] SQL store: Deleted {} record with key {}", deletedRows, key);
if (deletedRows == 1) {
return true;
} else if (deletedRows == 0) {
return false;
} else {
Loggers.SQL_LOG.error("[IS0114] Too many deleted rows ({}) for key {}", deletedRows, key);
throw new PersistenceException("Too many deleted rows for key " + key);
}
}
@Override
public void write(final MarshalledEntry extends K, ? extends V> marshalledEntry) {
// The CacheWriter should write the specified entry to the external storage.
//
// The PersistenceManager uses MarshalledEntry as the default format so that CacheWriters can efficiently store data coming
// from a remote node, thus avoiding any additional transformation steps.
//
// Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol
// such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
Loggers.SQL_LOG.trace("[IS0115] SQL store: Writing {} cache entry {}", getCacheName(), marshalledEntry);
try {
SQLRecord sqlRecord = sqlRecordTransformer.toSQLRecord(
new InfinispanEntry<>(
marshalledEntry.getKey(),
marshalledEntry.getValue(),
marshalledEntry.getMetadata()));
// Use H2 style MERGE, JOOQ will adapt it for the particular database
// http://www.jooq.org/doc/3.8/manual/sql-building/sql-statements/merge-statement/
Merge mergeStatement = sql.mergeInto(table(sqlRecordTransformer.getTableName()), sqlRecord.getFields().keySet())
.key(sqlRecord.getKeyColumns())
.values(sqlRecord.getFields().values());
String sqlStatement = jooqFixes.fixMergeStatement(mergeStatement);
int rows = sql.execute(sqlStatement);
if (rows != 1) {
if (SQLDialect.MYSQL.equals(config.getSQLDialect()) && rows == 2) {
// MySQL indicates UPDATE on INSERT by returning 2 num rows
return;
}
Loggers.SQL_LOG.error("[IS0116] SQL insert / update for key {} in table {} failed: Rows {}",
marshalledEntry.getKey(),sqlRecordTransformer.getTableName(), rows);
throw new PersistenceException("(Synthetic) SQL MERGE failed: Rows " + rows);
}
} catch (Exception e) {
Loggers.SQL_LOG.error("[IS0117] {}: {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
}
@Override
public void process(final KeyFilter super K> keyFilter,
final CacheLoaderTask cacheLoaderTask,
final Executor executor,
final boolean fetchValue,
final boolean fetchMetadata) {
Loggers.SQL_LOG.trace("[IS0118] SQL store: Processing key filter for {} cache: fetchValue={} fetchMetadata=",
getCacheName(), fetchValue, fetchMetadata);
final TaskContext taskContext = new TaskContextImpl();
// TODO consider multi-threaded SQL retrieval?
executor.execute(() -> {
try {
// Retrieves entire entry, fetchValue / fetchMetadata params are ignored TODO reconsider
sql.selectFrom(table(sqlRecordTransformer.getTableName()))
.fetch()
.forEach(record -> {
if (taskContext.isStopped()) {
return;
}
InfinispanEntry infinispanEntry = sqlRecordTransformer.toInfinispanEntry(record);
if (keyFilter.accept(infinispanEntry.getKey())) {
MarshalledEntry marshalledEntry = marshalledEntryFactory.newMarshalledEntry(
infinispanEntry.getKey(),
infinispanEntry.getValue(),
infinispanEntry.getMetadata());
try {
cacheLoaderTask.processEntry(marshalledEntry, taskContext);
} catch (InterruptedException e) {
throw new PersistenceException(e.getMessage(), e);
}
}
});
} catch (Exception e) {
Loggers.SQL_LOG.error("[IS0119] {}: {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
});
}
@Override
public int size() {
// Infinispan code analysis on 8.2 shows that this method is never called in practice, and
// is not wired to the data / cache container API
Loggers.SQL_LOG.trace("[IS0120] SQL store: Counting {} records", getCacheName());
final int count;
try {
count = sql.fetchCount(table(sqlRecordTransformer.getTableName()));
} catch (Exception e) {
Loggers.SQL_LOG.error("[IS0121] {}: {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
Loggers.SQL_LOG.trace("[IS0122] SQL store: Counted {} {} records", count, getCacheName());
return count;
}
@Override
public void clear() {
Loggers.SQL_LOG.trace("[IS0123] SQL store: Clearing {} records", getCacheName());
int numDeleted;
try {
numDeleted = sql.deleteFrom(table(sqlRecordTransformer.getTableName())).execute();
} catch (Exception e) {
Loggers.SQL_LOG.error("[IS0124] {}: {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
}
Loggers.SQL_LOG.info("[IS0125] SQL store: Cleared {} {} records", numDeleted, sqlRecordTransformer.getTableName());
}
@Override
public void purge(final Executor executor, final PurgeListener super K> purgeListener) {
Loggers.SQL_LOG.trace("[IS0126] SQL store: Purging {} cache entries", getCacheName());
final AtomicInteger numPurged = new AtomicInteger();
try {
executor.execute(() -> numPurged.set(reaper.purge(sql, purgeListener)));
} catch (Exception e) {
Loggers.SQL_LOG.error("[IS0127] {}: {}", e.getMessage(), e);
throw new PersistenceException("Purge exception: " + e.getMessage(), e);
}
Loggers.SQL_LOG.debug("[IS0128] SQL store: Purged {} expired {} cache entries", numPurged.get(), getCacheName());
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy