com.nimbusds.infinispan.persistence.sql.SQLStore Maven / Gradle / Ivy
package com.nimbusds.infinispan.persistence.sql;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.codahale.metrics.health.HealthCheckRegistry;
import com.nimbusds.common.monitor.MonitorRegistries;
import com.nimbusds.infinispan.persistence.common.InfinispanEntry;
import com.nimbusds.infinispan.persistence.common.InfinispanStore;
import com.nimbusds.infinispan.persistence.common.InternalMetadataBuilder;
import com.nimbusds.infinispan.persistence.common.query.QueryExecutor;
import com.nimbusds.infinispan.persistence.sql.config.SQLStoreConfiguration;
import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutor;
import com.nimbusds.infinispan.persistence.sql.query.SQLQueryExecutorInitContext;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import io.reactivex.rxjava3.core.Flowable;
import net.jcip.annotations.ThreadSafe;
import org.infinispan.commons.configuration.ConfiguredBy;
import org.infinispan.commons.persistence.Store;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.persistence.spi.PersistenceException;
import org.jooq.DSLContext;
import org.jooq.Merge;
import org.jooq.Record;
import org.jooq.SQLDialect;
import org.jooq.conf.RenderNameStyle;
import org.jooq.conf.Settings;
import org.jooq.impl.DSL;
import org.kohsuke.MetaInfServices;
import org.reactivestreams.Publisher;
import javax.sql.DataSource;
import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
import static org.jooq.impl.DSL.table;
/**
* SQL store for Infinispan caches and maps.
*/
@ThreadSafe
@MetaInfServices
@ConfiguredBy(SQLStoreConfiguration.class)
@Store(shared = true)
public class SQLStore extends InfinispanStore {
/**
* The supported databases.
*/
public static final List SUPPORTED_DATABASES = List.of(
SQLDialect.H2, SQLDialect.MYSQL, SQLDialect.POSTGRES_9_5, SQLDialect.SQLSERVER2016, SQLDialect.ORACLE
);
/**
* The SQL store configuration.
*/
private SQLStoreConfiguration config;
/**
* Enables sharing of the Hikari SQL data sources.
*/
private final static DataSources SHARED_DATA_SOURCES = new DataSources();
/**
* The Hikari SQL data source (with connection pool).
*/
private HikariDataSource dataSource;
/**
* Wrap the SQL data source with jOOQ.
* See http://stackoverflow.com/a/31389342/429425
*/
private DSLContext sql;
/**
* The SQL record transformer (to / from Infinispan entries).
*/
private SQLRecordTransformer sqlRecordTransformer;
/**
* The optional SQL query executor.
*/
private SQLQueryExecutor sqlQueryExecutor;
/**
* The marshalled Infinispan entry factory.
*/
private MarshallableEntryFactory marshallableEntryFactory;
/**
* Purges expired entries found in the SQL store, as indicated by
* their persisted metadata (optional, may be ignored / not stored).
*/
private ExpiredEntryReaper reaper;
/**
* SQL operation timers.
*/
private SQLTimers timers;
/**
* jOOQ query fixes.
*/
private JOOQFixes jooqFixes;
/**
* Loads an SQL record transformer with the specified class name.
*
* @param clazz The class. Must not be {@code null}.
*
* @return The SQL entry transformer.
*/
@SuppressWarnings( "unchecked" )
private SQLRecordTransformer loadRecordTransformerClass(final Class> clazz) {
try {
Class> genClazz = (Class>)clazz;
return genClazz.getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new PersistenceException("Couldn't load SQL record transformer class: " + e.getMessage(), e);
}
}
/**
* Loads an SQL query executor with the specified class name.
*
* @param clazz The class. Must not be {@code null}.
*
* @return The SQL query executor.
*/
@SuppressWarnings( "unchecked" )
private SQLQueryExecutor loadQueryExecutorClass(final Class> clazz) {
try {
Class> genClazz = (Class>)clazz;
return genClazz.getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new PersistenceException("Couldn't load SQL query executor class: " + e.getMessage(), e);
}
}
/**
* Returns the SQL store configuration.
*
* @return The SQL store configuration, {@code null} if not
* initialised.
*/
public SQLStoreConfiguration getConfiguration() {
return config;
}
/**
* Returns the underlying SQL data source.
*
* @return The underlying SQL data source, {@code null} if not
* initialised.
*/
public HikariDataSource getDataSource() {
return dataSource;
}
@Override
public void init(final InitializationContext ctx) {
// This method will be invoked by the PersistenceManager during initialization. The InitializationContext
// contains:
// - this CacheLoader's configuration
// - the cache to which this loader is applied. Your loader might want to use the cache's name to construct
// cache-specific identifiers
// - the StreamingMarshaller that needs to be used to marshall/unmarshall the entries
// - a TimeService which the loader can use to determine expired entries
// - a ByteBufferFactory which needs to be used to construct ByteBuffers
// - a MarshalledEntryFactory which needs to be used to construct entries from the data retrieved by the loader
super.init(ctx);
this.config = ctx.getConfiguration();
Loggers.MAIN_LOG.info("[IS0100] SQL store: Infinispan cache store configuration for {}:", getCacheName());
config.log();
Loggers.MAIN_LOG.info("[IS0140] SQL store: Expiration thread wake up interval for cache {}: {}", getCacheName(),
ctx.getCache().getCacheConfiguration().expiration().wakeUpInterval());
// Load and initialise the SQL record transformer
Loggers.MAIN_LOG.debug("[IS0101] Loading SQL record transformer class {} for cache {}...",
config.getRecordTransformerClass(),
getCacheName());
sqlRecordTransformer = loadRecordTransformerClass(config.getRecordTransformerClass());
sqlRecordTransformer.init(() -> config.getSQLDialect());
jooqFixes = new JOOQFixes(config.getSQLDialect(), sqlRecordTransformer.getCreateTableStatement());
// Load and initialise the optional SQL query executor
if (config.getQueryExecutorClass() != null) {
Loggers.MAIN_LOG.debug("[IS0201] Loading optional SQL query executor class {} for cache {}...",
config.getQueryExecutorClass(),
getCacheName());
sqlQueryExecutor = loadQueryExecutorClass(config.getQueryExecutorClass());
sqlQueryExecutor.init(new SQLQueryExecutorInitContext<>() {
@Override
public DataSource getDataSource() {
return dataSource;
}
@Override
public SQLRecordTransformer getSQLRecordTransformer() {
return sqlRecordTransformer;
}
@Override
public SQLDialect getSQLDialect() {
return config.getSQLDialect();
}
});
}
marshallableEntryFactory = ctx.getMarshallableEntryFactory();
timers = new SQLTimers(ctx.getCache().getName() + ".");
Loggers.MAIN_LOG.info("[IS0102] Initialized SQL external store for cache {} with table {}",
getCacheName(),
sqlRecordTransformer.getTableName());
}
private RetrievedSQLRecord wrap(final Record record) {
// Prevent retrieval exceptions because of the Oracle's
// internal conversion of all table column names to upper case
final boolean fieldsToUpperCase = SQLDialect.ORACLE.equals(config.getSQLDialect());
return new RetrievedSQLRecordImpl(record, fieldsToUpperCase);
}
/**
* Returns the underlying SQL record transformer.
*
* @return The SQL record transformer, {@code null} if not initialised.
*/
public SQLRecordTransformer getSQLRecordTransformer() {
return sqlRecordTransformer;
}
@Override
public QueryExecutor getQueryExecutor() {
return sqlQueryExecutor;
}
/**
* Starts the Hikari data source using the existing configuration.
*
* @return The data source.
*/
private HikariDataSource startDataSource() {
Properties hikariProps = HikariConfigUtils.removeNonHikariProperties(config.properties());
hikariProps = HikariConfigUtils.removeBlankProperties(hikariProps);
HikariPoolName poolName = HikariPoolName.setDefaultPoolName(hikariProps, getCacheName());
var hikariConfig = new HikariConfig(hikariProps);
MetricRegistry metricRegistry = MonitorRegistries.getMetricRegistry();
if (HikariConfigUtils.metricsAlreadyRegistered(poolName, metricRegistry)) {
Loggers.MAIN_LOG.warn("[IS0130] SQL store: Couldn't register Dropwizard metrics: Existing registered metrics for " + getCacheName());
} else {
hikariConfig.setMetricRegistry(metricRegistry);
}
HealthCheckRegistry healthCheckRegistry = MonitorRegistries.getHealthCheckRegistry();
if (HikariConfigUtils.healthChecksAlreadyRegistered(poolName, healthCheckRegistry)) {
Loggers.MAIN_LOG.warn("[IS0131] SQL store: Couldn't register Dropwizard health checks: Existing registered health checks for " + getCacheName());
} else {
hikariConfig.setHealthCheckRegistry(healthCheckRegistry);
}
return new HikariDataSource(hikariConfig);
}
@Override
public void start() {
// This method will be invoked by the PersistenceManager to start the CacheLoader. At this stage configuration
// is complete and the loader can perform operations such as opening a connection to the external storage,
// initialize internal data structures, etc.
if (config.getConnectionPool() == null) {
// Using own data source
dataSource = startDataSource();
SHARED_DATA_SOURCES.put(getCacheName(), dataSource);
} else {
// Using shared data source
dataSource = SHARED_DATA_SOURCES.get(config.getConnectionPool());
if (dataSource == null) {
// Defer start when connection pool becomes available
SHARED_DATA_SOURCES.deferStart(config.getConnectionPool(), this);
return;
}
}
// Init jOOQ SQL context
var jooqSettings = new Settings();
if (SQLDialect.H2.equals(config.getSQLDialect())) {
// Quoted column names occasionally cause problems in H2
jooqSettings.setRenderNameStyle(RenderNameStyle.AS_IS);
}
sql = DSL.using(dataSource, config.getSQLDialect(), jooqSettings);
if (config.createTableIfMissing()) {
try {
Loggers.MAIN_LOG.info("[IS0136] SQL store: Executing create table {} (if missing?) for cache {}", sqlRecordTransformer.getTableName(), getCacheName());
int rows = sql.execute(sqlRecordTransformer.getCreateTableStatement());
if (rows > 0) {
Loggers.MAIN_LOG.info("[IS0129] SQL store: Created table {} for cache {}", sqlRecordTransformer.getTableName(), getCacheName());
} else {
Loggers.MAIN_LOG.info("[IS0129] SQL store: Create table {} (if missing?) for cache {} returned {} changed rows", sqlRecordTransformer.getTableName(), getCacheName(), rows);
}
} catch (Exception e) {
String msg = "[IS0103] SQL store: Create table failed, {}: " + e.getMessage();
if (config.createTableIgnoreErrors()) {
Loggers.MAIN_LOG.warn(msg, "continuing");
} else {
Loggers.MAIN_LOG.fatal(msg, "aborting", e);
throw new PersistenceException(e.getMessage(), e);
}
}
// Alter table?
if (sqlRecordTransformer instanceof SQLTableTransformer) {
Loggers.MAIN_LOG.info("[IS0133] SQL store: Found table transformer");
List transformQueries = ((SQLTableTransformer)sqlRecordTransformer)
.getTransformTableStatements(
SQLTableUtils.getColumnNames(table(sqlRecordTransformer.getTableName()), sql)
);
if (transformQueries != null) {
for (String query: transformQueries) {
Loggers.MAIN_LOG.info("[IS0134] SQL store: Executing table transform for cache {}: {}", getCacheName(), query);
sql.execute(query);
}
}
}
} else {
Loggers.MAIN_LOG.info("[IS0132] SQL store: Skipped create table (if missing?) step");
}
Loggers.MAIN_LOG.info("[IS0104] Started SQL external store connector for cache {} with table {}", getCacheName(), sqlRecordTransformer.getTableName());
if (sqlRecordTransformer.getKeyColumnsForExpiredEntryReaper() != null) {
reaper = new ExpiredEntryPagedReaper<>(
marshallableEntryFactory,
sql,
sqlRecordTransformer,
this::wrap,
config.getExpiredQueryPageLimit(),
timers.deleteTimer);
} else {
reaper = new ExpiredEntryReaper<>(
marshallableEntryFactory,
sql,
sqlRecordTransformer,
this::wrap,
timers.deleteTimer);
}
}
@Override
public void stop() {
super.stop();
SHARED_DATA_SOURCES.remove(getCacheName());
if (dataSource != null && config.getConnectionPool() == null) {
dataSource.close();
}
Loggers.MAIN_LOG.info("[IS0105] Stopped SQL store connector for cache {}", getCacheName());
}
@SuppressWarnings("unchecked")
private K resolveKey(final Object key) {
if (key instanceof byte[]) {
throw new PersistenceException("Cannot resolve " + getCacheName() + " cache key from byte[], enable compatibility mode");
}
return (K)key;
}
@Override
public boolean contains(final Object key) {
// This method will be invoked by the PersistenceManager to determine if the loader contains the specified key.
// The implementation should be as fast as possible, e.g. it should strive to transfer the least amount of data possible
// from the external storage to perform the check. Also, if possible, make sure the field is indexed on the external storage
// so that its existence can be determined as quickly as possible.
//
// Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol
// such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
Loggers.SQL_LOG.trace("[IS0106] SQL store: Checking {} cache key {}", getCacheName(), key);
Timer.Context timerCtx = timers.loadTimer.time();
try {
return sql.selectOne()
.from(table(sqlRecordTransformer.getTableName()))
.where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
.fetchOne() != null;
} catch (Exception e) {
Loggers.SQL_LOG.error("[IS0107] {}: {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
} finally {
timerCtx.stop();
}
}
@Override
public MarshallableEntry loadEntry(final Object key) {
// Outdated?
// Fetches an entry from the storage using the specified key. The CacheLoader should retrieve from the external storage all
// of the data that is needed to reconstruct the entry in memory, i.e. the value and optionally the metadata. This method
// needs to return a MarshalledEntry which can be constructed as follows:
//
// ctx.getMarshalledEntryFactory().new MarshalledEntry(key, value, metadata);
//
// If the entry does not exist or has expired, this method should return null.
// If an error occurs while retrieving data from the external storage, this method should throw a PersistenceException
//
// Note that keys and values will be in the cache's native format, which means that if the cache is being used by a remoting protocol
// such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
// If the loader needs to have knowledge of the key/value data beyond their binary representation, then it needs access to the key's and value's
// classes and the marshaller used to encode them.
Loggers.SQL_LOG.trace("[IS0108] SQL store: Loading {} cache entry with key {}", getCacheName(), key);
final Record record;
Timer.Context timerCtx = timers.loadTimer.time();
try {
record = sql.selectFrom(table(sqlRecordTransformer.getTableName()))
.where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
.fetchOne();
} catch (Exception e) {
Loggers.SQL_LOG.error("[IS0109] {}, {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
} finally {
timerCtx.stop();
}
if (record == null) {
// Not found
Loggers.SQL_LOG.trace("[IS0110] SQL store: Record with key {} not found", key);
return null;
}
if (Loggers.SQL_LOG.isTraceEnabled()) {
Loggers.SQL_LOG.trace("[IS0111] SQL store: Retrieved record: {}", record);
}
// Transform SQL record to Infinispan entry
InfinispanEntry infinispanEntry;
try {
infinispanEntry = sqlRecordTransformer.toInfinispanEntry(wrap(record));
} catch (Exception e) {
Loggers.SQL_LOG.error("[IS0137] SQL store: Error transforming SQL record for key " + key + ": " + e.getMessage());
throw e;
}
if (infinispanEntry.isExpired()) {
Loggers.SQL_LOG.trace("[IS0135] SQL store: Record with key {} expired", key);
return null;
}
return marshallableEntryFactory.create(
infinispanEntry.getKey(),
infinispanEntry.getValue(),
infinispanEntry.getMetadata(),
PrivateMetadata.empty(),
infinispanEntry.created(),
infinispanEntry.lastUsed()
);
}
@Override
public boolean delete(final Object key) {
// The CacheWriter should remove from the external storage the entry identified by the specified key.
// Note that keys will be in the cache's native format, which means that if the cache is being used by a remoting protocol
// such as HotRod or REST and compatibility mode has not been enabled, then they will be encoded in a byte[].
Loggers.SQL_LOG.trace("[IS0112] SQL store: Deleting {} cache entry with key {}", getCacheName(), key);
int deletedRows;
Timer.Context timerCtx = timers.deleteTimer.time();
try {
deletedRows = sql.deleteFrom(table(sqlRecordTransformer.getTableName()))
.where(sqlRecordTransformer.resolveSelectionConditions(resolveKey(key)))
.execute();
} catch (Exception e) {
Loggers.SQL_LOG.error("[IS0113] {}, {}", e.getMessage(), e);
throw new PersistenceException(e.getMessage(), e);
} finally {
timerCtx.stop();
}
Loggers.SQL_LOG.trace("[IS0113] SQL store: Deleted {} record with key {}", deletedRows, key);
if (deletedRows == 1) {
return true;
} else if (deletedRows == 0) {
return false;
} else {
Loggers.SQL_LOG.error("[IS0114] Too many deleted rows ({}) for key {}", deletedRows, key);
throw new PersistenceException("Too many deleted rows for key " + key);
}
}
@Override
public void write(final MarshallableEntry extends K, ? extends V> entry) {
Loggers.SQL_LOG.trace("[IS0115] SQL store: Writing {} cache entry {}", getCacheName(), entry);
Timer.Context timerCtx = timers.writeTimer.time();
try {
SQLRecord sqlRecord = sqlRecordTransformer.toSQLRecord(
new InfinispanEntry<>(
entry.getKey(),
entry.getValue(),
new InternalMetadataBuilder()
.created(entry.created())
.lastUsed(entry.lastUsed())
.lifespan(entry.getMetadata() != null ? entry.getMetadata().lifespan() : -1L)
.maxIdle(entry.getMetadata() != null ? entry.getMetadata().maxIdle() : -1L)
.build()));
// Oracle (N)CLOB chunking
// https://stackoverflow.com/a/63957679/429425
final AtomicReference © 2015 - 2025 Weber Informatics LLC | Privacy Policy