All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.rajivprab.sava.database.ConcurrentConnectionPool Maven / Gradle / Ivy

package org.rajivprab.sava.database;

import org.rajivprab.cava.ThreadUtilc;
import org.rajivprab.cava.Validatec;
import org.rajivprab.cava.exception.SQLExceptionc;
import org.rajivprab.sava.logging.Dispatcher;
import org.rajivprab.sava.logging.LogDispatcher;
import org.rajivprab.sava.logging.Severity;

import java.sql.Connection;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ThreadLocalRandom;
import java.util.stream.IntStream;

/**
 * Similar to DBCP, except that getting a resource does not remove it from the pool.
 * It is the user's responsibility to ensure that all connections are used in a thread-safe manner.
 * Ie, calls like commit() and rollback() should prohibited,
 * because they could commit or rollback some other thread's query.
 *
 * Note that concurrent use of a connection to run multiple auto-commit SQL queries should be fine.
 * Functionally, the connection spec is thread-safe: http://stackoverflow.com/questions/1531073/is-java-sql-connection-thread-safe
 * However, the performance is not good. All queries on the same connection are serialized behind one another,
 * with the 2nd query only getting executed after the 1st query has completely finished.
 * Parallelism is only attained when the queries are sent via different connections.
 * 

* Created by rprabhakar on 10/5/15. */ public class ConcurrentConnectionPool { private static final Duration LEECH_TIMEOUT = Duration.ofSeconds(30); private final LogDispatcher logDispatcher; private final int connectionsRequested; private final List connections = new CopyOnWriteArrayList<>(); public static ConcurrentConnectionPool build(ConnectionFactory factory, int numConnections) { return build(LogDispatcher.build(Dispatcher.getWarningDispatcher()), factory, numConnections); } public static ConcurrentConnectionPool build(LogDispatcher logDispatcher, ConnectionFactory factory, int numConnections) { return new ConcurrentConnectionPool(logDispatcher, factory, numConnections); } private ConcurrentConnectionPool(LogDispatcher logDispatcher, ConnectionFactory factory, int connectionsRequested) { this.connectionsRequested = connectionsRequested; this.logDispatcher = logDispatcher; new Thread(() -> seedAll(factory)).start(); } public Connection leech() { int size = connections.size(); Instant start = Instant.now(); while (size == 0 && start.plus(LEECH_TIMEOUT).isAfter(Instant.now())) { logDispatcher.report(this, Severity.INFO, "Waiting for connections to fill up"); ThreadUtilc.sleep(50); size = connections.size(); } Validatec.notEmpty(connections, "No DB connections available to leech"); return connections.get(ThreadLocalRandom.current().nextInt(size)); } public void closeAllConnections() { closeAllConnections(Duration.ofSeconds(10)); } public synchronized void closeAllConnections(Duration timeout) { Instant end = Instant.now().plus(timeout); while (!allConnectionsFilled() && Instant.now().isBefore(end)) { // Close as many as possible immediately, in case JVM gets shutdown any second connections.parallelStream().forEach(this::closeConnection); logDispatcher.report(this, Severity.INFO, connections.size() + " DB connections closed. Waiting for more"); ThreadUtilc.sleep(timeout.dividedBy(10)); } // Close all connections one last time once they are all available, or once we hit timeout Severity severity = allConnectionsFilled() ? Severity.INFO : Severity.WARN; connections.parallelStream().forEach(this::closeConnection); logDispatcher.report(this, severity, connections.size() + " DB connections closed"); } // --------------- private boolean allConnectionsFilled() { Validatec.greaterOrEqual(connectionsRequested, connections.size()); return connections.size() == connectionsRequested; } private void seedAll(ConnectionFactory factory) { Validatec.size(connections, 0, "Connections should be empty at start"); IntStream.range(0, connectionsRequested) .parallel() .mapToObj(i -> genConnection(factory)) .filter(Optional::isPresent) .map(Optional::get) .forEach(connections::add); logDispatcher.report(this, Severity.INFO, "Finished seeding " + connections.size() + " connections"); } private Optional genConnection(ConnectionFactory factory) { try { return Optional.of(factory.createNewConnection()); } catch (SQLExceptionc e) { // Likely cause: Too many connections opened int numConnectionsCreated = connections.size(); Severity severity = numConnectionsCreated >= (connectionsRequested / 2) ? Severity.WARN : Severity.FATAL; logDispatcher.report(this, severity, "Cannot create new connection. Present pool size: " + numConnectionsCreated, e); return Optional.empty(); } } private void closeConnection(Connection connection) { try { if (connection.isClosed()) { logDispatcher.report(this, Severity.WARN, "Connection was already closed: " + connection); } else { connection.close(); logDispatcher.report(this, Severity.INFO, "Connection closed: " + connection); // Do not remove connection from list since it messes up the allConnectionsFilled method } } catch (Exception e) { logDispatcher.report(this, Severity.ERROR, "Error closing: " + connection, e); } } }





© 2015 - 2025 Weber Informatics LLC | Privacy Policy