com.sun.grizzly.pool.DynamicPool Maven / Gradle / Ivy
/*
Example of use:
RubyAdapter rh = new RubyAdapter (railsRoot,jrubyLib);
DynamicPool pool = new DynamicPool(rh, 1, true);
pool.start();
would, assuming a correctly-implemented RubyAdapter class, create a pool serving Ruby instances
*/
package com.sun.grizzly.pool;
import com.sun.grizzly.http.SelectorThread;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
/**
* A generic self-sizing pool of objects.
*
* TODO: Backwards compatability? How?
* // Can RubyObjectPool just redirect to DynamicPool with a RubyAdapter?
* TODO: Check thread-safeness for a) actual thread-safeness and b) efficency.
* It is likely that a competent application of volatile and such would make this faster
*
*
* @author TAKAI Naoto
* @author Pramod Gopinath
* @author Vivek Pandey
* @author Jacob Kessler
*/
public class DynamicPool {
/**
* How long to wait before giving up.
*/
public static final long DEFAULT_TIMEOUT = 360L;
/**
* The number of objects.
*/
private final int defaultNumberOfObjects = 5;
/**
* Object queue
*/
private final BlockingQueue queue = new LinkedBlockingQueue();
/**
* Is Grizzly ARP enabled.
*/
private final boolean asyncEnabled;
private final int numberOfObjects;
// We use this so much, we might as well cache it
private final int procs = Runtime.getRuntime().availableProcessors();
// CachedThreadPool size limited by currentlyGeneratingObjects: We want to avoid overloading the system
// during times of high load by trying to generate lots of new objects. However, we may want to
// generate multiple objects at the same time on a large machine
private ExecutorService objectGenerator = Executors.newCachedThreadPool();
private final int maxGeneratingObjects;
private final int maxGeneratingObjects_def = 1;
/**
* The maximum objects that we want sitting in the queue (idle) at a time)
*/
private AtomicInteger maximumIdleObjects = new AtomicInteger(1);
/*
* currentlyActiveObjects keeps track of how many objects the pool "knows" about, either lent out
* or sitting in the queue. It uses that knowledge, along with the limits on the number of objects,
* to prevent itself from making excessive objects, even under heavy load, since at some point making
* a new objects won't actualy help the load conditions
*
* currentlyGeneratingObjects keeps track of how many objects are scheduled for creation but have not
* yet been added to the queue. In case object creation takes a long time relative to the request
* service time, requests on a capacity pool have a tendency to impatiently request 3 or 4 objects
* while waiting for a new object to initialize, which would otherwise cause 3 or 4 too many objects
* to be created, costing both memory while the object lives (bad) and CPU time to create and remove
* the object (really bad, since object creation should be while under load). currentlyGeneratingObjects
* allows the object creator to refuse both object creation requests and object creation ticks while
* it already has object queued to be created.
*
*/
private volatile int currentlyActiveObjects = 0;
private volatile int currentlyGeneratingObjects = 0;
// Soft limit on the number of objects we will create
// Will adjust based on load
private AtomicInteger maximumActiveObjects = new AtomicInteger (procs);
// Hard minimum and Maximum values for number of objects. The dynamic resizer will not exceed these limits
private int hardMinActiveObjects_def = procs;
private final int hardMaxActiveObjects;
private final int hardMinActiveObjects;
/*
* The "Tick" variables are the variables that keep track of how close to the various thresholds
* the pool is. Despite being read and modified all over the place by different threads, they have been
* maintained as normal variables, since they have wide margins of error and tend to be self-correcting
* Furthermore, the consequences of them being off by a read or two aren't very severe: To trigger the
* threashold, they would need to have been reasonably close anyway. As such, it seems like labling them
* as volatile would be an unneeded performance hit. I'm happy to accept criticism from someone more experianced
* than me, though.
*/
private AtomicInteger downTicks = new AtomicInteger(0); // Manages increasing the number of runtimes
private AtomicInteger upTicks = new AtomicInteger(0); // Manages decreasing the number of runtimes
private AtomicInteger queueTicks = new AtomicInteger(0); // Manages the queue length
private AtomicInteger newTicks = new AtomicInteger(0); // Manages new Runtime creation
/*
* Threasholds control how many ticks are required to change their associated property. Decreasing
* these values will make the pool change size more rapidly in response to changing traffic, but
* also makes it more likely to create or delete objects in response to brief spikes or dips in
* incoming requests. A constructor has been provided to allow users to set their own values.
*/
private final int upThreashold;
private final int downThreashold;
private final int queueThreashold;
private final int newThreashold;
private final long baseTime;
private final boolean dynamic;
private boolean validate;
private PoolAdapter objectLib;
/**
* Create a pool of Ruby objects. This can be created for the whole container and each of the object can be used
* from the pool to setup Rails instance that serves the request.
*
* @param type An object that knows how to create, validate, and dispose of the objects this pool is responsible for
* @param numObjects number of objects to be created initially.
* @param asyncEnabled whether grizzly ARP needs to be enabled
* @param valid Should objects returning to the pool be checked for validity?
*/
public DynamicPool(PoolAdapter type, int numObjects, boolean asyncEnabled, boolean valid) {
this (type, numObjects, asyncEnabled, valid, 10, -1,-1,-1); // -1 provides default values
}
/**
* Create a pool of objects. This can be created for the whole container and each of the objects can be used
* from the pool to serve requests. This constructor allows finer control over some of the
* dynamic bounds and object pool resizing logic. If
*
* @param type An object that knows how to create, validate, and dispose of the objects this pool is responsible for
* @param numObjects number of pool objects to be created initially.
* @param asyncEnabled whether grizzly ARP needs to be enabled
* @param valid Should objects returning to the pool be checked for validity?
* @param responsiveness How quickly the pool adjusts its bounds. Higher values will react more quickly to changes in load. Suggested values are between 5 and 15, 10 is default.
* @param minObjects Minimal number of objects to keep active at all times, regardless of load. If minObjects is larger than numObjects, objects will increase as needed until minObjects is reached
* @param maxObjects Maximal number of objects to keep active at all times, regardless of load.
* @param maxInProg Maximum number of objects to schedule for creation at once. Defaults to one, but performance may be increased in situations where there are lots of processors and highly variable load by increasing it.
*/
public DynamicPool(PoolAdapter type, int numObjects, boolean asyncEnabled, boolean valid,
int responsiveness, int minObjects, int maxObjects, int maxInProg) {
objectLib = type;
this.asyncEnabled = asyncEnabled;
if (numObjects > 0) {
this.numberOfObjects = numObjects;
} else {
this.numberOfObjects = defaultNumberOfObjects;
}
maximumIdleObjects.set(numberOfObjects);
baseTime = System.currentTimeMillis();
if (responsiveness > 19) {
responsiveness = 19;
}
upThreashold = 20-responsiveness;
downThreashold = 20-responsiveness;
queueThreashold = 20-responsiveness;
newThreashold = 20-responsiveness;
if (maxObjects > 0) {
hardMaxActiveObjects = maxObjects;
} else {
hardMaxActiveObjects = maximumActiveObjects.intValue() * 2;
}
if (minObjects > 0 && minObjects <= hardMaxActiveObjects) {
hardMinActiveObjects = minObjects;
} else {
hardMinActiveObjects = Math.min(hardMinActiveObjects_def, hardMaxActiveObjects);
}
String dynamicEnable = System.getProperty("jruby.runtime.dynamic");
dynamic = dynamicEnable == null || Boolean.valueOf(dynamicEnable);
validate = valid;
if (maxInProg > 0) {
maxGeneratingObjects = maxInProg;
} else {
maxGeneratingObjects = maxGeneratingObjects_def;
}
logDynamicStatus();
}
/**
* Build a dynamic pool of objects based on a provided PoolAdapter and DynamicPoolConfig.
* DynamicPoolConfig allows all internal options to be set.
* Values from DynamicPoolConfig are copied out, and a reference to the PoolConfig is not stored, so it is
* safe to change values in a DynamicPoolConfig after the pool has been created.
* @param type An object that knows how to create, validate, and dispose of the objects this pool is responsible for
* @param config The config object that contains the internal variables for this pool
*/
public DynamicPool(PoolAdapter type, DynamicPoolConfig config) {
objectLib = type;
numberOfObjects = config.getNumberOfObjects();
maxGeneratingObjects = config.getMaxGeneratingObjects();
hardMaxActiveObjects = config.getHardMaxActiveObjects();
hardMinActiveObjects = config.getHardMinActiveObjects();
upThreashold = config.getUpThreashold();
downThreashold = config.getDownThreashold();
queueThreashold = config.getQueueThreashold();
newThreashold = config.getNewThreashold();
asyncEnabled = config.isAsyncEnabled();
validate = config.shouldValidate();
baseTime = System.currentTimeMillis();
String dynamicEnable = System.getProperty("jruby.runtime.dynamic");
dynamic = dynamicEnable == null || Boolean.valueOf(dynamicEnable);
logDynamicStatus();
}
public long getBaseTime() {
return baseTime;
}
public boolean getValidation() {
return validate;
}
public void setValidation(boolean val) {
validate = val;
}
/**
* Retrives an object from the object pool.
*
* @return a object.
*/
public T borrowObject() {
//long time = System.currentTimeMillis();
if (isAsyncEnabled()) {
// check to see if we can get one right now
try {
T gotten = queue.poll(50, TimeUnit.MILLISECONDS); // Wait, but only briefly
if (gotten != null) {
if (dynamic) { // Only keep track of statistics if dynamic is enabled
if (queue.size() == 0) { // If we took the last runtime
int localQueue = queueTicks.incrementAndGet();
if (localQueue > queueThreashold) { // Deal with increasing the maximum queue size.
queueTicks.set(0);
int localIdle = maximumIdleObjects.incrementAndGet();
if (localIdle > currentlyActiveObjects) {
maximumIdleObjects.set(currentlyActiveObjects); // Volatile is enough for non-assignment operations (right?)
}
}
} else { // Otherwise, there were at least two idle runtimes, so we have plenty lying around
int localNew = newTicks.decrementAndGet();
if (localNew < 0) {
newTicks.set(0); // may drop an update, oh well
}
int localQueue = queueTicks.decrementAndGet(); // start thinking about reducing the queue size
if (localQueue < -queueThreashold) {
queueTicks.set(0);
// Reduce size of queue;
int localIdle = maximumIdleObjects.decrementAndGet();
if (localIdle < procs) { // Keep a minimum queue size of # of processors
maximumIdleObjects.set(procs);
}
}
}
}
SelectorThread.logger().log(Level.FINE,
"Recieved new runtime from the queue. " + currentlyActiveObjects + "/" + maximumActiveObjects +
" active Objects (" + queue.size() + " idle, " + (currentlyActiveObjects - queue.size()) + " active");
return gotten;
} else { // This is the branch for "We waited 50ms for a runtime, but did not recieve one."
// request that the number of runtimes be increased
if (dynamic) { // think about increasing the maximum number of active runtimes
int localUp = upTicks.incrementAndGet();
int localQueue = queueTicks.incrementAndGet();
if (localUp > upThreashold) {
upTicks.set(0);
queueTicks.set(0);
maximumIdleObjects.incrementAndGet();
int localMax = maximumActiveObjects.incrementAndGet();
if (localMax > hardMaxActiveObjects) {
maximumActiveObjects.set(hardMaxActiveObjects);
}
}
if (localQueue > queueThreashold) {
queueTicks.set(0);
int localIdle = maximumIdleObjects.incrementAndGet();
if (localIdle > currentlyActiveObjects) {
maximumIdleObjects.set(currentlyActiveObjects);
}
}
}
makeNewObject(); // Vote for more Objects to be created
// Block until an object becomes available
gotten = queue.poll(DEFAULT_TIMEOUT, TimeUnit.SECONDS);
//System.out.println("Wait time (pool miss): " + (System.currentTimeMillis() - time));
SelectorThread.logger().log(Level.FINE,
"Recieved new runtime from the queue. " + currentlyActiveObjects + "/" + maximumActiveObjects +
" active objects (" + queue.size() + " idle, " + (currentlyActiveObjects - queue.size()) + " active");
return gotten;
}
} catch (InterruptedException e) {
// Following old InterruptedException behavior
throw new RuntimeException(e);
}
} else {
// old non-async behavior
return queue.poll();
}
}
/**
* Returns object to the object pool.
*
* @param object - object to be returned after use
*/
public void returnObject(T object) {
// check to see if we should bother returning this object to the queue
if (queue.size() < maximumIdleObjects.intValue()) {
if (validate) {
if (objectLib.validate(object)) {
queue.offer(object);
} else {
// invalid object returned!
currentlyActiveObjects--; // CAO is a volatile, so we don't need to sync around it here, which is good since dispose() may take a while
objectLib.dispose(object);
// need to replace it
newTicks.addAndGet(newThreashold); // ensure creation of a new object
makeNewObject();
}
} else {
queue.offer(object);
}
if (dynamic) {
int localDown = downTicks.decrementAndGet();
if (localDown < 0) {
downTicks.set(0); // may drop an update, oh well.
}
SelectorThread.logger().log(Level.FINE,
"Returned object to the queue. " + currentlyActiveObjects + "/" + maximumActiveObjects +
" active objects (" + queue.size() + " idle, " + (currentlyActiveObjects - queue.size()) + " active");
}
} else {
SelectorThread.logger().log(Level.FINE,
"Excessive idle objects: returned object not re-added to the queue. " + currentlyActiveObjects + "/" + maximumActiveObjects +
" active objects (" + queue.size() + " idle, " + (currentlyActiveObjects - queue.size()) + " active");
if (dynamic) {
int localDown = downTicks.incrementAndGet();
int localUp = upTicks.decrementAndGet();
if (localDown > downThreashold) {
downTicks.set(0);
if (currentlyActiveObjects > hardMinActiveObjects) {
currentlyActiveObjects--;
// Otherwise, we just allow it to fall on the floor and be cleaned up by the GC
objectLib.dispose(object);
//System.out.println("Dropped an object\t" + (System.currentTimeMillis() - baseTime) + " " + queue.size());
} else {
queue.offer(object);
}
} else {
//System.out.println("Downticks at " + (System.currentTimeMillis() - baseTime) + " " + downTicks);
queue.offer(object);
}
if (localUp < -upThreashold && maximumIdleObjects.intValue() > numberOfObjects) {
// Reduce the number of runtimes we are willing to hold around
upTicks.set(0);
int localIdle = maximumIdleObjects.decrementAndGet();
int localActive = maximumActiveObjects.decrementAndGet();
if (localIdle < numberOfObjects) {
maximumIdleObjects.set(numberOfObjects);
}
if (localActive < hardMinActiveObjects) {
maximumActiveObjects.set(hardMinActiveObjects);
}
}
} else {
currentlyActiveObjects--;
objectLib.dispose(object);
}
}
}
/**
* Starts the object pool. Calling this multiple times on the same object pool will cause undefined, and probably bad, behavior
*/
public void start() {
try {
int pnum = procs;
ExecutorService exec = Executors.newFixedThreadPool(pnum + 1);
for (int i = 0; i < numberOfObjects; i++) {
currentlyActiveObjects++;
exec.execute(new Runnable() {
public void run() {
long startTime = System.currentTimeMillis();
T newObject = objectLib.initializeObject();
SelectorThread.logger().log(Level.INFO,
" " + (System.currentTimeMillis() - baseTime) + " New instance created " + (System.currentTimeMillis() - startTime)/1000);
queue.offer(newObject);
}
});
}
exec.shutdown();
exec.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
/**
* Creates a new object, to be called when we run out of objects in the queue
*/
private void makeNewObject() {
//System.out.println("Currently Active: " + currentlyActiveObjects + " of " + maximumActiveObjects + ", Making " + currentlyGeneratingObjects + " of " + maxGeneratingObjects);
if ((currentlyActiveObjects < maximumActiveObjects.intValue()) && (currentlyGeneratingObjects < maxGeneratingObjects)) {
int localNew = newTicks.addAndGet(2);
//System.out.println("New Ticks: " + newTicks);
if (localNew > newThreashold) {
currentlyActiveObjects++;
currentlyGeneratingObjects++;
newTicks.set(0);
objectGenerator.submit(new Runnable() {
public void run() {
long startTime = System.currentTimeMillis();
T newObject = objectLib.initializeObject();
SelectorThread.logger().log(Level.INFO,
" " + (System.currentTimeMillis() - baseTime) + " New instance created " + (System.currentTimeMillis() - startTime)/1000);
queue.offer(newObject);
currentlyGeneratingObjects--;
}
});
}
}
}
/**
* Shutdowns the object pool.
*/
public void stop() {
for (T thing : queue) {
objectLib.dispose(thing);
}
queue.clear();
// Stop our object-creation thread
try {
objectGenerator.shutdown();
objectGenerator.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
} catch (InterruptedException e) {
SelectorThread.logger().log(Level.WARNING,
"Object Pool interrupted on shutdown!");
}
}
public int getNumberOfObjects() {
return numberOfObjects;
}
protected BlockingQueue getObjectQueue() {
return queue;
}
public boolean isAsyncEnabled() {
return asyncEnabled;
}
private void logDynamicStatus() {
// logs the min, max, etc. values of the dynamic pool on startup
if (dynamic) {
SelectorThread.logger().log(Level.INFO,
"Dynamic pool created. Initial runtimes will be " + numberOfObjects + ", hard minimum is " + hardMinActiveObjects +
", hard maximum is " + hardMaxActiveObjects +
". If you experiance out of memory errors, consider increasing the heap size or " +
"manually adjusting the hard maximum using the provided constructors.");
} else {
SelectorThread.logger().log(Level.INFO,
"Pool started without dynamic resizing enabled. Pool will not attempt to determine the upper and lower bounds that it should be using");
}
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy