oracle.toplink.essentials.internal.identitymaps.WeakIdentityMap Maven / Gradle / Ivy
The newest version!
/*
* The contents of this file are subject to the terms
* of the Common Development and Distribution License
* (the "License"). You may not use this file except
* in compliance with the License.
*
* You can obtain a copy of the license at
* glassfish/bootstrap/legal/CDDLv1.0.txt or
* https://glassfish.dev.java.net/public/CDDLv1.0.html.
* See the License for the specific language governing
* permissions and limitations under the License.
*
* When distributing Covered Code, include this CDDL
* HEADER in each file and include the License file at
* glassfish/bootstrap/legal/CDDLv1.0.txt. If applicable,
* add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your
* own identifying information: Portions Copyright [yyyy]
* [name of copyright owner]
*/
// Copyright (c) 1998, 2005, Oracle. All rights reserved.
package oracle.toplink.essentials.internal.identitymaps;
import java.util.*;
/**
* Purpose: A WeakIdentityMap holds all objects referenced by the application only.
* The weak identity map is similar to the full identity map except for the fact that it allows
* full garbage collection.
*
Responsibilities:
* - Guarantees identity
*
- Allows garbage collection
*
* @since TOPLink/Java 1.0
*/
public class WeakIdentityMap extends FullIdentityMap {
/** Keep track of a counter to amortize cleanup of dead cache keys */
protected int cleanupCount;
/** PERF: Keep track of a cleanup size to avoid cleanup bottleneck for large caches. */
protected int cleanupSize;
public WeakIdentityMap(int size) {
super(size);
this.cleanupCount = 0;
this.cleanupSize = size;
}
/**
* Search for any cache keys that have been garbage collected and remove them.
* This must be done because allthough the objects held by the cache keys will garbage collect,
* the keys themselves will not and must be cleaned up. This is a linear opperation so
* is amortized through the cleanupCount to occur only once per cycle avergaing to make
* the total time still constant.
*/
protected void cleanupDeadCacheKeys() {
for (Enumeration keysEnum = getCacheKeys().elements(); keysEnum.hasMoreElements();) {
CacheKey key = (CacheKey)keysEnum.nextElement();
if (key.getObject() == null) {
// Check lock first.
//Change for CR 2317
if (key.acquireNoWait()) {
try {
if (key.getObject() == null) {
getCacheKeys().remove(key);
}
} finally {
key.release();
}
}
//change complete CR 2317
}
}
}
public CacheKey createCacheKey(Vector primaryKey, Object object, Object writeLockValue, long readTime) {
return new WeakCacheKey(primaryKey, object, writeLockValue, readTime);
}
/**
* Used to amortized the cleanup of dead cache keys.
*/
protected int getCleanupCount() {
return cleanupCount;
}
protected void setCleanupCount(int cleanupCount) {
this.cleanupCount = cleanupCount;
}
/**
* Used to amortized the cleanup of dead cache keys.
*/
protected int getCleanupSize() {
return cleanupSize;
}
protected void setCleanupSize(int cleanupSize) {
this.cleanupSize = cleanupSize;
}
/**
* Store the object in the cache with the cache key.
*/
protected void put(CacheKey cacheKey) {
//CR3712 Add the method back.
synchronized (this) {
if (getCleanupCount() > getCleanupSize()) {
cleanupDeadCacheKeys();
setCleanupCount(0);
// PERF: Avoid cleanup bottleneck for large cache sizes, increase next cleanup.
if (getSize() > getCleanupSize()) {
setCleanupSize(getSize());
}
}
setCleanupCount(getCleanupCount() + 1);
}
super.put(cacheKey);
}
}