All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.rajivprab.sava.database.ConcurrentConnectionPool Maven / Gradle / Ivy

package org.rajivprab.sava.database;

import org.rajivprab.cava.PreparedStatementc.SqlException;
import org.rajivprab.cava.ThreadUtilc;
import org.rajivprab.cava.Validatec;
import org.rajivprab.sava.logging.Dispatcher;
import org.rajivprab.sava.logging.LogDispatcher;
import org.rajivprab.sava.logging.Severity;

import java.sql.Connection;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.stream.IntStream;

/**
 * Similar to DBCP, except that getting a resource does not remove it from the pool.
 * It is the user's responsibility to ensure that all connections are used in a thread-safe manner.
 * Ie, calls like commit() and rollback() should prohibited,
 * because they could commit or rollback some other thread's transaction.
 *
 * Note that concurrent use of a connection to run multiple auto-commit SQL queries should be fine.
 * Functionally, the connection spec is thread-safe: http://stackoverflow.com/questions/1531073/is-java-sql-connection-thread-safe
 *
 * For performance, my testing seems to indicate that at least for PostgreSQL connections, they can be concurrently used
 * by multiple threads to run independent queries, without them being serialized behind one another.
 * 

* Created by rprabhakar on 10/5/15. */ public class ConcurrentConnectionPool { private static final Duration LEECH_TIMEOUT = Duration.ofSeconds(30); private final LogDispatcher logDispatcher; private final int connectionsRequested; private final List connections = new CopyOnWriteArrayList<>(); // Used to decide which connection to use next. No problems even if this is not thread-safe private int index = 0; public static ConcurrentConnectionPool build(ConnectionFactory factory, int numConnections) { return build(LogDispatcher.build(Dispatcher.getWarningDispatcher()), factory, numConnections); } public static ConcurrentConnectionPool build(LogDispatcher logDispatcher, ConnectionFactory factory, int numConnections) { return new ConcurrentConnectionPool(logDispatcher, factory, numConnections); } private ConcurrentConnectionPool(LogDispatcher logDispatcher, ConnectionFactory factory, int connectionsRequested) { this.connectionsRequested = connectionsRequested; this.logDispatcher = logDispatcher; new Thread(() -> seedAll(factory)).start(); } public Connection leech() { int size = connections.size(); Instant start = Instant.now(); while (size == 0 && start.plus(LEECH_TIMEOUT).isAfter(Instant.now())) { logDispatcher.report(this, Severity.INFO, "Waiting for connections to fill up"); ThreadUtilc.sleep(5); size = connections.size(); } Validatec.notEmpty(connections, "No DB connections available to leech"); return connections.get(index++ % size); } public void closeAllConnections() { closeAllConnections(Duration.ofSeconds(10)); } public synchronized void closeAllConnections(Duration timeout) { Instant start = Instant.now(); // Close as many as possible immediately, in case we never get to the first timeout below connections.parallelStream().forEach(this::closeConnection); logDispatcher.report(this, Severity.INFO, connections.size() + " DB connections closed successfully"); while (!allConnectionsFilled(timeout.dividedBy(10)) && start.plus(timeout).isAfter(Instant.now())) { // Keep closing existing-connections periodically, in case we never get to final-goal connections.parallelStream().forEach(this::closeConnection); logDispatcher.report(this, Severity.INFO, connections.size() + " DB connections closed successfully"); } // Close all connections one last time once they are all available, or once we hit timeout connections.parallelStream().forEach(this::closeConnection); Severity severity = connections.size() < connectionsRequested ? Severity.WARN : Severity.INFO; logDispatcher.report(this, severity, connections.size() + " DB connections closed successfully"); } // --------------- private boolean allConnectionsFilled(Duration timeout) { Instant start = Instant.now(); while ((connections.size() != connectionsRequested) && start.plus(timeout).isAfter(Instant.now())) { // This is needed for tests and short JVMs, where shutdown hook is called before connections are seeded logDispatcher.report(this, Severity.INFO, "Waiting for connections to fill up before closing"); ThreadUtilc.sleep(timeout.dividedBy(10)); } return connections.size() == connectionsRequested; } private void seedAll(ConnectionFactory factory) { Validatec.size(connections, 0, "Connections should be empty at start"); IntStream.range(0, connectionsRequested) .parallel() .mapToObj(i -> genConnection(factory)) .filter(Optional::isPresent) .map(Optional::get) .forEach(connections::add); } private Optional genConnection(ConnectionFactory factory) { try { return Optional.of(factory.createNewConnection()); } catch (SqlException e) { // Likely cause: Too many connections opened Severity severity = connections.size() >= (connectionsRequested / 2) ? Severity.WARN : Severity.FATAL; logDispatcher.report(this, severity, "Cannot create new connection. Present pool size: " + connections.size(), e); return Optional.empty(); } } private void closeConnection(Connection connection) { try { if (connection.isClosed()) { logDispatcher.report(this, Severity.INFO, "Connection was already closed: " + connection); } else { connection.close(); logDispatcher.report(this, Severity.INFO, "Connection closed: " + connection); // Do not remove connection from list since it messes up the allConnectionsFilled method } } catch (Exception e) { logDispatcher.report(this, Severity.ERROR, "Error closing: " + connection, e); } } }





© 2015 - 2025 Weber Informatics LLC | Privacy Policy