All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.sun.grizzly.pool.DynamicPool Maven / Gradle / Ivy

/*
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
 *
 * Copyright 1997-2007 Sun Microsystems, Inc. All rights reserved.
 *
 * The contents of this file are subject to the terms of either the GNU
 * General Public License Version 2 only ("GPL") or the Common Development
 * and Distribution License("CDDL") (collectively, the "License").  You
 * may not use this file except in compliance with the License. You can obtain
 * a copy of the License at https://glassfish.dev.java.net/public/CDDL+GPL.html
 * or glassfish/bootstrap/legal/LICENSE.txt.  See the License for the specific
 * language governing permissions and limitations under the License.
 *
 * When distributing the software, include this License Header Notice in each
 * file and include the License file at glassfish/bootstrap/legal/LICENSE.txt.
 * Sun designates this particular file as subject to the "Classpath" exception
 * as provided by Sun in the GPL Version 2 section of the License file that
 * accompanied this code.  If applicable, add the following below the License
 * Header, with the fields enclosed by brackets [] replaced by your own
 * identifying information: "Portions Copyrighted [year]
 * [name of copyright owner]"
 *
 * Contributor(s):
 *
 * If you wish your version of this file to be governed by only the CDDL or
 * only the GPL Version 2, indicate your decision by adding "[Contributor]
 * elects to include this software in this distribution under the [CDDL or GPL
 * Version 2] license."  If you don't indicate a single choice of license, a
 * recipient has the option to distribute your version of this file under
 * either the CDDL, the GPL Version 2 or to extend the choice of license to
 * its licensees as provided above.  However, if you add GPL Version 2 code
 * and therefore, elected the GPL Version 2 license, then the option applies
 * only if the new code is made subject to such option by the copyright
 * holder.
 */
package com.sun.grizzly.pool;

import com.sun.grizzly.http.SelectorThread;

import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.*;
import java.util.logging.Level;

/**
 * A generic self-sizing pool of objects.
 * Example of use:
 * 

*
 * RubyAdapter rh = new RubyAdapter (railsRoot,jrubyLib);
 * DynamicPool pool = new DynamicPool(rh, 1, true);
 * pool.start();
 * 
* would, assuming a correctly-implemented RubyAdapter class, create a pool * serving Ruby instances * * @author Vivek Pandey * @author Jacob Kessler */ public class DynamicPool { /** * How long to wait before giving up. */ public static final long DEFAULT_TIMEOUT = 360L; /** * Object queue */ private final BlockingQueue queue = new LinkedBlockingQueue(); /** * Is Grizzly ARP enabled. */ private final boolean asyncEnabled; /** * Initial number of objects to create. Used at startup and to set some defaults, then ignored */ private final int numberOfObjects; // CachedThreadPool size limited by currentlyGeneratingObjects: We want to avoid overloading the system // during times of high load by trying to generate lots of new objects. However, we may want to // generate multiple objects at the same time on a large machine private ExecutorService objectGenerator = Executors.newCachedThreadPool(); /** * Maximum objects to generate at a time. Runtimes take long enough to generate that there will often be many pool timeouts * while waiting for the object to generate, if the pool is actually running over capacity. Once the pool is generating maxGeneratingObjects, * it will completely ignore requests for new runtimes, and the tick variable will not be incremented, until it is finished generating. Default one. */ private final int maxGeneratingObjects; /** * The maximum objects that we want sitting in the queue (idle) at a time). This serves as a soft limit for minimum runtimes, * as well as buffering against brief drops in traffic. */ private AtomicInteger maximumIdleObjects = new AtomicInteger(1); /* * currentlyActiveObjects keeps track of how many objects the pool "knows" about, either lent out * or sitting in the queue. It uses that knowledge, along with the limits on the number of objects, * to prevent itself from making excessive objects, even under heavy load, since at some point making * a new objects won't actualy help the load conditions. It should always be between hardMinActiveObjects and hardMaxActiveObjects * * currentlyGeneratingObjects keeps track of how many objects are scheduled for creation but have not * yet been added to the queue. In case object creation takes a long time relative to the request * service time, requests on a capacity pool have a tendency to impatiently request 3 or 4 objects * while waiting for a new object to initialize, which would otherwise cause 3 or 4 too many objects * to be created, costing both memory while the object lives (bad) and CPU time to create and remove * the object (really bad, since object creation should be while under load). currentlyGeneratingObjects, combined with maxGeneratingObjects, * allows the object creator to refuse both object creation requests and object creation ticks while * it already has object(s) queued to be created. * */ private volatile int currentlyActiveObjects = 0; private volatile int currentlyGeneratingObjects = 0; // Hard minimum and Maximum values for number of objects. The dynamic resizer will not exceed these limits private final int hardMaxActiveObjects; private final int hardMinActiveObjects; /* * The "Tick" variables are the variables that keep track of how close to the various thresholds * the pool is. */ private AtomicInteger downTicks = new AtomicInteger(0); // Manages decreasing the number of runtimes private AtomicInteger queueTicks = new AtomicInteger(0); // Manages the queue length private AtomicInteger newTicks = new AtomicInteger(0); // Manages new Runtime creation /* * Threasholds control how many ticks are required to change their associated property. Decreasing * these values will make the pool change size more rapidly in response to changing traffic, but * also makes it more likely to create or delete objects in response to brief spikes or dips in * incoming requests. A constructor has been provided to allow users to set their own values. */ // private final static int threashold_def = 10; private AtomicInteger downThreashold = new AtomicInteger(10); //private AtomicInteger queueThreashold = new AtomicInteger(10); private final int queueThreashold; private AtomicInteger newThreashold = new AtomicInteger(10); // It's int flags! private volatile boolean createdLast = true; // We start off creating things private AtomicInteger successfulRequests = new AtomicInteger(0); private AtomicInteger successThreashold = new AtomicInteger(3000); // this is an exponential function, so it should damp itself private final double successGrowthPercentage = 1.25; // what percentage should the threshold grow by? private final double failureGrowthPercentage = .8; // How much should we lower threasholds if we fail twice in a row? // Basetime is the time that the server was started, and is used for logging private final long baseTime; /** * LastRequest was for implementing a time-based sweeper that would clean out runtimes under 0 load. * It has been left in in case that feature is desired in the future. */ //private AtomicLong lastRequest = new AtomicLong(); /** * Dynamic controls whether the pool should keep track of all of the variables to dynamically resize itself * Currently, it turns itself off if hardMin == initial == hardMax, and is on in all other cases */ private final boolean dynamic; /** * Validate controls whether or not objects need to be checked for correctness before they are returned to the pool */ private final boolean validate; // Tuning Variables private final int requestTimeout = 50; // Number of ms to initially wait for a runtime private final boolean fineLog = SelectorThread.logger().isLoggable(Level.FINE); // Should we bother to log things at the "fine" level? // The PoolAdapter provides methods (create, destroy, and validate) that are object-specific private PoolAdapter objectLib; /** * Build a dynamic pool of objects based on a provided PoolAdapter and DynamicPoolConfig. * DynamicPoolConfig allows all internal options to be set. * Values from DynamicPoolConfig are copied out, and a reference to the PoolConfig is not stored, so it is * safe to change values in a DynamicPoolConfig after the pool has been created. * @param type An object that knows how to create, validate, and dispose of the objects this pool is responsible for * @param config The config object that contains the internal variables for this pool */ public DynamicPool(PoolAdapter type, DynamicPoolConfig config) { objectLib = type; numberOfObjects = config.getNumberOfObjects(); maxGeneratingObjects = config.getMaxGeneratingObjects(); hardMaxActiveObjects = config.getHardMaxActiveObjects(); hardMinActiveObjects = config.getHardMinActiveObjects(); // Set max idle to starting runtimes maximumIdleObjects.set(numberOfObjects); downThreashold.set(config.getDownThreashold()); queueThreashold = config.getQueueThreashold(); newThreashold.set(config.getNewThreashold()); dynamic = config.isDynamic(); asyncEnabled = config.isAsyncEnabled(); validate = config.shouldValidate(); baseTime = System.currentTimeMillis(); // Writes the dynamic pool's status to the log file logDynamicStatus(); } public long getBaseTime() { return baseTime; } public boolean getValidation() { return validate; } /** * Retrives an object from the object pool. * * @return a object. */ public T borrowObject() { // As above, lastRequest is for the time-based sweeper //lastRequest.set(System.currentTimeMillis()); long time = System.currentTimeMillis(); if (isAsyncEnabled()) { // check to see if we can get one right now try { T gotten = queue.poll(requestTimeout, TimeUnit.MILLISECONDS); // Wait, but only briefly if (gotten != null) { if (dynamic) { // Only keep track of statistics if dynamic is enabled if (queue.size() == 0) { // If we took the last runtime, we should think about increasing the maximum idlers int localQueue = queueTicks.incrementAndGet(); if (localQueue > queueThreashold) { // Deal with increasing the maximum queue size. queueTicks.set(0); int localIdle = maximumIdleObjects.incrementAndGet(); // Make sure that we are never allowing more idle runtime than exist if (localIdle > currentlyActiveObjects) { maximumIdleObjects.set(currentlyActiveObjects); // Volatile is enough for non-assignment operations } } } else { // Otherwise, there were at least two idle runtimes, so we have plenty lying around int localNew = newTicks.decrementAndGet(); // reduce newTicks, since we don't actually need a new runtime if (localNew < 0) { // make sure that it doesn't go below 0, or new runtimes will take a very long time to start creation newTicks.set(0); } int localQueue = queueTicks.decrementAndGet(); // start thinking about reducing the queue size if (localQueue < -queueThreashold) { // Reduce size of queue if we are over the threashold queueTicks.set(0); int localIdle = maximumIdleObjects.decrementAndGet(); if (localIdle < 1) { // Make sure we allow at least one idle runtime maximumIdleObjects.set(1); } } } // We sucessfully handed out a runtime, which is what we really care about int localSuccess = successfulRequests.incrementAndGet(); if (localSuccess >= successThreashold.get()) { // These pool values seem to be working rather well, then, so we'll try to keep them successfulRequests.set(0); successThreashold.set((int)(successThreashold.get()*successGrowthPercentage)); downThreashold.incrementAndGet(); newThreashold.incrementAndGet(); // Log about it if (fineLog) { // Since we seem to be at a pool size that works, talk about our threasholds SelectorThread.logger().log(Level.FINE, "Pool size seems to be working with " + currentlyActiveObjects + " total." + "Creation threashold is at " + newThreashold.get() + " Drop threashold is at " + downThreashold.get()); } } } if (fineLog) { // Log sucessful runtime borrowing if we are at FINE log or lower long waitTime = System.currentTimeMillis() - time; SelectorThread.logger().log(Level.FINE, Messages.format(Messages.DYNAMICPOOL_RECEIVED_NEW_OBJECT, waitTime, currentlyActiveObjects, hardMaxActiveObjects, queue.size(), (currentlyActiveObjects - queue.size()))); } return gotten; } else { // This is the branch for "We waited 50ms for a runtime, but did not recieve one." if (dynamic) { // increase the queue ticks: it's probably pretty high already, but if we are making new runtimes we don't want to be dropping any int localQueue = queueTicks.incrementAndGet(); if (localQueue > queueThreashold) { queueTicks.set(0); int localIdle = maximumIdleObjects.incrementAndGet(); if (localIdle > currentlyActiveObjects) { maximumIdleObjects.set(currentlyActiveObjects); } } } voteNewObject(); // Vote for more Objects to be created gotten = queue.poll(DEFAULT_TIMEOUT, TimeUnit.SECONDS); // Block until an object becomes available. Theoretically, this would time out after 6 minutes. if (fineLog) { // Log pool miss if we are at FINE log or lower long waitTime = System.currentTimeMillis() - time; SelectorThread.logger().log(Level.FINE, Messages.format(Messages.DYNAMICPOOL_RECEIVED_NEW_OBJECT_VOTED, waitTime, currentlyActiveObjects, hardMaxActiveObjects, queue.size(), (currentlyActiveObjects - queue.size()))); } return gotten; } } catch (InterruptedException e) { throw new RuntimeException(e); // Following old InterruptedException behavior: If someone interrupts us, things are going pear-shaped fairly quickly } } else { // old non-async behavior return queue.poll(); } } /** * Returns object to the object pool. * * @param object - object to be returned after use */ public void returnObject(T object) { // First, check to see if we should bother returning this object to the queue if (queue.size() < maximumIdleObjects.intValue()) { // Queue is less than max idle, return without complaint validateAndReturn(object); // Check the object for validity, then return if (dynamic) { int localDown = downTicks.decrementAndGet(); // We returned an object under the queue size, so we don't need to think as much about dropping runtimes if (localDown < 0) { downTicks.set(0); } if (fineLog) { // Log sucessful return of an object to the pool if we are at FINE or lower SelectorThread.logger().log(Level.INFO, Messages.format(Messages.DYNAMICPOOL_RETURNED_OBJECT, currentlyActiveObjects, hardMaxActiveObjects, queue.size(), currentlyActiveObjects - queue.size())); } } } else { // The queue is already too large if (dynamic) { int localDown = downTicks.incrementAndGet(); // Think about dropping the object int localQueue = queueTicks.decrementAndGet(); // Think about reducing the queue size if (localDown > downThreashold.get()) { // If localDown > downThreashold, we have clearence to drop a runtime downTicks.set(0); if (currentlyActiveObjects > hardMinActiveObjects) { // We are above hard minimum, and thus we can drop an object currentlyActiveObjects--; // reduce the number of active objects objectLib.dispose(object); // request any cleanup that might need to happen if (fineLog) { // Log removal of object SelectorThread.logger().log(Level.FINE, Messages.format(Messages.DYNAMICPOOL_EXCESSIVE_OBJECTS, currentlyActiveObjects, hardMaxActiveObjects, queue.size(), currentlyActiveObjects - queue.size())); } // Think about our pool size if (createdLast) { // We created an object, then dropped one. That's bad, and means we should be less willing to create objects in the future newThreashold.incrementAndGet(); SelectorThread.logger().log(Level.INFO, "Create-drop with RT = " + currentlyActiveObjects + ". newThreashold is " + newThreashold.get() + ", downThreashold is " + downThreashold); } else { // We dropped an object, then dropped another one. We should be more willing to drop objects in the future downThreashold.decrementAndGet(); SelectorThread.logger().log(Level.INFO, "drop-drop with RT = " + currentlyActiveObjects + ". newThreashold is " + newThreashold.get() + ", downThreashold is " + downThreashold); } // Either way, we just dropped an object. Set the last property, drop successful requests to 0 to prevent staticizing createdLast = false; successfulRequests.set(0); // We aren't as static as we thought we were. Reduce the number of sucessful requests required to be "static" to prevent calcification successThreashold.set((int)(successThreashold.get()*failureGrowthPercentage)); } else { // Dynamic pool thinks that the object should be dropped, but we are already at hardMin if (fineLog) { // Grumble about things being fine until the users get involved in the logs SelectorThread.logger().log(Level.FINE, Messages.format(Messages.DYNAMICPOOL_RETURNED_HARD_MINIMUM, currentlyActiveObjects, hardMaxActiveObjects, queue.size(), currentlyActiveObjects - queue.size())); } validateAndReturn(object); // Return the object anyway } } else { // Queue was too large, but downticks hasn't reached the threashold yet if (fineLog) { // Log our intent to drop an object in the future SelectorThread.logger().log(Level.FINE, Messages.format(Messages.DYNAMICPOOL_RETURNED_OBJECT_REDUCTION, currentlyActiveObjects, hardMaxActiveObjects, queue.size(), currentlyActiveObjects - queue.size())); } validateAndReturn(object); // Return the object } if (localQueue < -queueThreashold && maximumIdleObjects.intValue() > 0) { queueTicks.set(0); int localIdle = maximumIdleObjects.decrementAndGet(); // Reduce the number of runtimes we are willing to hold around if (localIdle < 1) { maximumIdleObjects.set(1); } } } else { if (currentlyActiveObjects > hardMaxActiveObjects) { // This is outside dynamic, and thus shouldn't ever really happen // This is "The queue is too long, there are too many active objects, and dynamic is off" currentlyActiveObjects--; // That should never happen, but with dynamic off it wouldn't be corrected if it did, for example by calling start() twice objectLib.dispose(object); } else { validateAndReturn(object); // If we don't have too many objects, we wonder how the queue can be too long without too many active objects and re-add the object } } } } /** * Make sure that returned objects pass their fitness tests, and then return them to the pool if they do. * If they don't, clean them up and request a new object * @param object the object to be returned */ private void validateAndReturn(T object) { if (validate) { // Only check validation if we've been told that we need to if (objectLib.validate(object)) { queue.offer(object); // If the object passes validation, return it to the queue } else { // invalid object returned! currentlyActiveObjects--; // One less active object, since this isn't going to make it into the pool objectLib.dispose(object); makeNewObject(); // Make a new object to replace it. This is a direct call to makeNewObject(), so it will bypass newTicks, but not maxGenerating } } else { queue.offer(object); // If we don't need to do validation, just return the object. } } /** * Starts the object pool. Calling this multiple times on the same object pool will cause undefined, and probably bad, behavior * More specifically, the start method doesn't pay attention to hardMax, which means that two calls to start may create activeObjects > hardMax, which breaks some of the pool's assumptions * @param threads the number of threads to start for object generation. Each thread will attempt to initialize one object at a time until all objects are initialized */ public void start(int threads) { try { ExecutorService exec = Executors.newFixedThreadPool(threads); SelectorThread.logger().log(Level.FINE, Messages.format(Messages.DYNAMICPOOL_STARTING_THREADPOOL, threads)); for (int i = 0; i < numberOfObjects; i++) { // Submit tasks to the executor currentlyActiveObjects++; exec.execute(new Runnable() { public void run() { long startTime = System.currentTimeMillis(); T newObject = objectLib.initializeObject(); // Initialize the object according to the library SelectorThread.logger().log(Level.INFO, Messages.format(Messages.DYNAMICPOOL_NEWINSTANCE_CREATION_TIME, System.currentTimeMillis() - startTime)); queue.offer(newObject); // Add it to the queue } }); } SelectorThread.logger().log(Level.FINE, Messages.format(Messages.DYNAMICPOOL_SHUTDOWN)); // Announce sucessful submission of the tasks exec.shutdown(); if (exec.awaitTermination(numberOfObjects * 30, TimeUnit.SECONDS)) { // Wait up to 30 seconds per runtime for the executor to finish starting the objects SelectorThread.logger().log(Level.FINE, Messages.format(Messages.DYNAMICPOOL_INIT_FINISHED)); // Announce sucessful initialization } else { // Things are taking "too long": complain to the user if we might be hung if (numberOfObjects > 1 && threads > 1) { // As far as we know, Jruby can only hang if we are starting two or more instances at a time SelectorThread.logger().log(Level.SEVERE, Messages.format(Messages.DYNAMICPOOL_INIT_ERR, currentlyActiveObjects, numberOfObjects)); } exec.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); // Wait forever. Not actually forever, but noticably longer than the current age of the universe. } /* Commented out until we decide that the watcher should be implemented // Start watcher thread. This solves the problem of not decreasing the pool while there is no load. // This fix will decrease it very slowly (one runtime every two minutes) of no load. lastRequest.set(System.currentTimeMillis()); objectGenerator.execute (new Runnable() { public void run() { while (!objectGenerator.isShutdown()) { try { // While the pool is still running Thread.sleep(1200); long time = System.currentTimeMillis(); if (lastRequest.get() - time > 1000) { // It's been more than a second since the last request if (queue.size() > hardMinActiveObjects) { // If there is the potential to drop a runtime, simulate a request T temp = borrowObject(); returnObject(temp); } } } catch (InterruptedException e) { SelectorThread.logger().log(Level.INFO, "Pool watcher thread interrupted. This should only happen on shutdown"); } } } }); */ } catch (InterruptedException e) { // Interrupting us is a bad thing. We don't know how many have completed, and don't have a way to ask the executor // Make a guess, warn the user, and pass the interruption up. currentlyActiveObjects = queue.size(); SelectorThread.logger().log(Level.WARNING, Messages.format(Messages.DYNAMICPOOL_INIT_INTERRUPTED)); Thread.currentThread().interrupt(); } } /** * Vote for the creation of a new object, to be called when we run out of objects in the queue */ private void voteNewObject() { if ((currentlyActiveObjects < hardMaxActiveObjects) && (currentlyGeneratingObjects < maxGeneratingObjects)) { int localNew = newTicks.addAndGet(2); // If we have room for another runtime, and we aren't already making one, tick up if (localNew > newThreashold.get()) { // If we are past the threashold, make a new runtime newTicks.set(0); makeNewObject(); } } if (currentlyActiveObjects < hardMinActiveObjects) { // If we are under hard min (for example, because several objects failed validation), make a new one makeNewObject(); } } /** * Creates a new object, to be called when we are sure that we want a new object */ private void makeNewObject() { if ((currentlyActiveObjects < hardMaxActiveObjects) && (currentlyGeneratingObjects < maxGeneratingObjects)) { // Check to make sure that we aren't exceeding any limits currentlyActiveObjects++; currentlyGeneratingObjects++; objectGenerator.submit(new Runnable() { // Submit a new object creation job. This is essentially the same code as the start() method public void run() { try { long startTime = System.currentTimeMillis(); T newObject = objectLib.initializeObject(); SelectorThread.logger().log(Level.INFO, Messages.format(Messages.DYNAMICPOOL_NEW_INSTANCE, currentlyActiveObjects, System.currentTimeMillis() - startTime)); queue.offer(newObject); } catch (Exception e) { // We need to be reasonably careful about exceptions here, since we are executing user code, and that can fail in all sorts of spectacular ways. currentlyActiveObjects--; // If object creation fails, we didn't get an object. } finally { currentlyGeneratingObjects--; // In all cases, decrement our generation count when we finish } } }); // Think about our pool size if (createdLast) { // We created an object, then created another one. We should be more willing to create objects in the future newThreashold.decrementAndGet(); SelectorThread.logger().log(Level.INFO, "Create-create with RT = " + currentlyActiveObjects + ". newThreashold is " + newThreashold.get() + ", downThreashold is " + downThreashold); } else { // We dropped an object, then created another one. We should be less willing to drop objects in the future downThreashold.incrementAndGet(); SelectorThread.logger().log(Level.INFO, "Drop-create with RT = " + currentlyActiveObjects + ". newThreashold is " + newThreashold.get() + ", downThreashold is " + downThreashold); } // Either way, we just created an object. Set successfulRequests to 0 to prevent staticizing createdLast = true; successfulRequests.set(0); // We aren't as static as we thought we were. Reduce the number of sucessful requests required to be "static" to prevent calcification successThreashold.set((int)(successThreashold.get()*failureGrowthPercentage)); } } /** * Shutdowns the object pool. */ public void stop() { for (T thing : queue) { // dispose of each of the queue objects objectLib.dispose(thing); } queue.clear(); // delete them all try { // Stop our object-creation thread objectGenerator.shutdown(); objectGenerator.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); } catch (InterruptedException e) { SelectorThread.logger().log(Level.WARNING, Messages.format(Messages.DYNAMICPOOL_SHUTDOWN_INTERRUPTED)); } } /* * Public accessor methods for active objects, the queue, and async-enabled. */ public int getNumberOfObjects() { return currentlyActiveObjects; } public BlockingQueue getObjectQueue() { return queue; } public boolean isAsyncEnabled() { return asyncEnabled; } private void logDynamicStatus() { // logs the min, max, etc. values of the dynamic pool on startup if (dynamic) { SelectorThread.logger().log(Level.INFO, Messages.format(Messages.DYNAMICPOOL_STATUS, numberOfObjects, hardMinActiveObjects, hardMaxActiveObjects)); } else { SelectorThread.logger().log(Level.INFO, Messages.format(Messages.DYNAMICPOOL_DISABLED, numberOfObjects)); } } }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy