All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.carrotsearch.hppc.IntObjectWormMap Maven / Gradle / Ivy

Go to download

High Performance Primitive Collections: data structures (maps, sets, lists, stacks, queues) generated for combinations of object and primitive types to conserve JVM memory and speed up execution.

There is a newer version: 0.10.0
Show newest version
  
package com.carrotsearch.hppc;

import java.util.*;

import com.carrotsearch.hppc.cursors.*;
import com.carrotsearch.hppc.predicates.*;
import com.carrotsearch.hppc.procedures.*;

import static com.carrotsearch.hppc.HashContainers.*;
import static com.carrotsearch.hppc.Containers.*;
import static com.carrotsearch.hppc.WormUtil.*;

/**
 * A hash map of int to Object, implemented using Worm Hashing strategy.
 *
 * 

This strategy is appropriate for a medium sized map (less than 2M entries). It takes more time * to put entries in the map because it maintains chains of entries having the same hash. Then the * lookup speed is fast even if the map is heavy loaded or hashes are clustered. On average it takes * slightly less memory than {@link IntObjectHashMap}: slightly heavier but the load factor is higher * (it varies around 80%) so it enlarges later.

* * @see HPPC interfaces diagram */ @SuppressWarnings("unchecked") @com.carrotsearch.hppc.Generated( date = "2021-06-08T13:12:55+0200", value = "KTypeVTypeWormMap.java") public class IntObjectWormMap implements IntObjectMap, Preallocable, Cloneable, Accountable { /** * The array holding keys. */ public int [] keys; /** * The array holding values. */ public Object[] values; /** * {@code abs(next[i])=offset} to next chained entry index.

{@code next[i]=0} for free bucket.

The * offset is always forward, and the array is considered circular, meaning that an entry at the end of the * array may point to an entry at the beginning with a positive offset.

The offset is always forward, but the * sign of the offset encodes head/tail of chain. {@link #next}[i] > 0 for the first head-of-chain entry (within * [1,{@link WormUtil#maxOffset}]), {@link #next}[i] < 0 for the subsequent tail-of-chain entries (within [-{@link * WormUtil#maxOffset},-1]. For the last entry in the chain, {@code abs(next[i])=}{@link WormUtil#END_OF_CHAIN}.

*/ public byte[] next; /** * Map size (number of entries). */ protected int size; /** * Seed used to ensure the hash iteration order is different from an iteration to another. */ protected int iterationSeed; /** * New instance with sane defaults. */ public IntObjectWormMap() { this(DEFAULT_EXPECTED_ELEMENTS); } /** * New instance with the provided defaults. * *

There is no load factor parameter as this map enlarges automatically. In practice the load factor * varies around 80% (between 75% and 90%). The load factor is 100% for tiny maps.

* * @param expectedElements The expected number of elements. The capacity of the map is calculated based on it. */ public IntObjectWormMap(int expectedElements) { if (expectedElements < 0) { throw new IllegalArgumentException("Invalid expectedElements=" + expectedElements); } iterationSeed = HashContainers.nextIterationSeed(); ensureCapacity(expectedElements); } /** * Creates a new instance from all key-value pairs of another container. */ public IntObjectWormMap(IntObjectAssociativeContainer container) { this(container.size()); putAll(container); } /** * Creates a new instance from two index-aligned arrays of key-value pairs. */ public static IntObjectWormMap from(int[] keys, VType[] values) { if (keys.length != values.length) { throw new IllegalArgumentException("Arrays of keys and values must have an identical length."); } IntObjectWormMap map = new IntObjectWormMap<>(keys.length); for (int i = 0; i < keys.length; i++) { map.put(keys[i], values[i]); } return map; } /** * Clones this map. The cloning operation is efficient because it copies directly the internal arrays, without * having to put entries in the cloned map. The cloned map has the same entries and the same capacity as this map. * * @return A shallow copy of this map. */ @Override public IntObjectWormMap clone() { try { /* */ IntObjectWormMap cloneMap = (IntObjectWormMap) super.clone(); cloneMap.keys = keys.clone(); cloneMap.values = values.clone(); cloneMap.next = next.clone(); cloneMap.iterationSeed = HashContainers.nextIterationSeed(); return cloneMap; } catch (CloneNotSupportedException e) { throw new RuntimeException(e); } } /** * The value returned when there is no value associated to a key in this map. * This method can be extended to change it. */ public VType noValue() { return null; } /** {@inheritDoc} */ @Override public int size() { return size; } /** {@inheritDoc} */ @Override public boolean isEmpty() { return size == 0; } /** {@inheritDoc} */ @Override public VType get(int key) { // Compute the key hash index. int hashIndex = hashMod(key); int nextOffset = next[hashIndex]; if (nextOffset <= 0) { // The bucket is either free, or only used for chaining, so no entry for the key. return noValue(); } // The bucket contains a head-of-chain entry. // Look for the key in the chain. int entryIndex = searchInChain(key, hashIndex, nextOffset); // Return the value if an entry in the chain matches the key. return entryIndex < 0 ? noValue() : (VType) values[entryIndex]; } /** {@inheritDoc} */ @Override public VType getOrDefault(int key, VType defaultValue) { VType value; return (value = get(key)) == noValue() ? defaultValue : value; } /** {@inheritDoc} */ @Override public VType put(int key, VType value) { return put(key, value, PutPolicy.NEW_OR_REPLACE, true); } /** {@inheritDoc} */ @Override public int putAll(IntObjectAssociativeContainer container) { final int initialSize = size(); for (IntObjectCursor c : container) { put(c.key, c.value); } return size() - initialSize; } /** {@inheritDoc} */ @Override public int putAll(Iterable> iterable) { final int initialSize = size(); for (IntObjectCursor c : iterable) { put(c.key, c.value); } return size() - initialSize; } /** * @param key The key of the value to check. * @param value The value to put if key does not exist. * @return true if key did not exist and value was placed in the map. */ public boolean putIfAbsent(int key, VType value) { return noValue() == put(key, value, PutPolicy.NEW_ONLY_IF_ABSENT, true); } /** {@inheritDoc} */ @Override public VType remove(int key) { final byte[] next = this.next; // Compute the key hash index. int hashIndex = hashMod(key); int nextOffset = next[hashIndex]; if (nextOffset <= 0) { // The bucket is either free, or in tail-of-chain, so no entry for the key. return noValue(); } // The bucket contains a head-of-chain entry. // Look for the key in the chain. int previousEntryIndex = searchInChainReturnPrevious(key, hashIndex, nextOffset); if (previousEntryIndex < 0) { // No entry matches the key. return noValue(); } int entryToRemoveIndex = previousEntryIndex == Integer.MAX_VALUE ? hashIndex : addOffset(previousEntryIndex, Math.abs(next[previousEntryIndex]), next.length); return remove(entryToRemoveIndex, previousEntryIndex); } /** {@inheritDoc} */ @Override public int removeAll(IntContainer other) { // Try to iterate over the smaller set of values // or over the container that isn't implementing efficient contains() lookup. int size = size(); if (other.size() >= size && other instanceof IntLookupContainer) { final int[] keys = this.keys; final byte[] next = this.next; final int capacity = next.length; int entryIndex = 0; while (entryIndex < capacity) { int key; if (next[entryIndex] != 0 && other.contains(key = keys[entryIndex])) { this.remove(key); } else { entryIndex++; } } } else { for (IntCursor c : other) { remove( c.value); } } return size - size(); } /** {@inheritDoc} */ @Override public int removeAll(IntPredicate predicate) { final int[] keys = this.keys; final byte[] next = this.next; final int capacity = next.length; int size = size(); int entryIndex = 0; while (entryIndex < capacity) { int key; if (next[entryIndex] != 0 && predicate.apply(key = keys[entryIndex])) { this.remove(key); } else { entryIndex++; } } return size - size(); } /** {@inheritDoc} */ @Override public int removeAll(IntObjectPredicate predicate) { final int[] keys = this.keys; final VType[] values = (VType[]) this.values; final byte[] next = this.next; final int capacity = next.length; int size = size(); int entryIndex = 0; while (entryIndex < capacity) { int key; if (next[entryIndex] != 0 && predicate.apply(key = keys[entryIndex], values[entryIndex])) { this.remove(key); } else { entryIndex++; } } return size - size(); } /** {@inheritDoc} */ @Override public > T forEach(T procedure) { final int[] keys = this.keys; final VType[] values = (VType[]) this.values; final byte[] next = this.next; int seed = nextIterationSeed(); int inc = iterationIncrement(seed); for (int i = 0, mask = next.length - 1, slot = seed & mask; i <= mask; i++, slot = (slot + inc) & mask) { if (next[slot] != 0) { procedure.apply(keys[slot], values[slot]); } } return procedure; } /** {@inheritDoc} */ @Override public > T forEach(T predicate) { final int[] keys = this.keys; final VType[] values = (VType[]) this.values; final byte[] next = this.next; int seed = nextIterationSeed(); int inc = iterationIncrement(seed); for (int i = 0, mask = next.length - 1, slot = seed & mask; i <= mask; i++, slot = (slot + inc) & mask) { if (next[slot] != 0) { if (!predicate.apply(keys[slot], values[slot])) { break; } } } return predicate; } /** {@inheritDoc} */ @Override public KeysContainer keys() { return new KeysContainer(); } /** {@inheritDoc} */ @Override public ObjectCollection values() { return new ValuesContainer(); } /** {@inheritDoc} */ @Override public Iterator> iterator() { return new EntryIterator(); } /** {@inheritDoc} */ @Override public boolean containsKey(int key) { int hashIndex = hashMod(key); int nextOffset = next[hashIndex]; if (nextOffset <= 0) { return false; } return searchInChain(key, hashIndex, nextOffset) >= 0; } /** {@inheritDoc} */ @Override public void clear() { Arrays.fill(next, (byte) 0); size = 0; /* */ /* */ Arrays.fill(values, noValue()); /* */ } /** {@inheritDoc} */ @Override public void release() { keys = null; values = null; next = null; size = 0; ensureCapacity(DEFAULT_EXPECTED_ELEMENTS); } /** {@inheritDoc} */ @Override @SuppressWarnings("unchecked") public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } IntObjectMap map = (IntObjectMap) o; final int size = this.size; if (size != map.size()) { return false; } final int[] keys = this.keys; final VType[] values = (VType[]) this.values; final byte[] next = this.next; // Iterate all entries. for (int index = 0, entryCount = 0; entryCount < size; index++) { if (next[index] != 0) { if (!java.util.Objects.equals(map.get(keys[index]), values[index])) { return false; } entryCount++; } } return true; } /** {@inheritDoc} */ @Override public int hashCode() { int hashCode = 0; // Iterate all entries. final int size = this.size; for (int index = 0, entryCount = 0; entryCount < size; index++) { if (next[index] != 0) { hashCode += BitMixer.mixPhi(keys[index]) ^ BitMixer.mixPhi(values[index]); entryCount++; } } return hashCode; } protected int hashKey(int key) { return BitMixer.mixPhi(key); } private int hashMod(int key) { return hashKey(key) & (next.length - 1); } /** {@inheritDoc} */ @Override public int indexOf(int key) { int hashIndex = hashMod(key); int nextOffset = next[hashIndex]; if (nextOffset <= 0) { return ~hashIndex; } return searchInChain(key, hashIndex, nextOffset); } /** {@inheritDoc} */ @Override public boolean indexExists(int index) { assert index < next.length; return index >= 0; } /** {@inheritDoc} */ @Override public VType indexGet(int index) { assert checkIndex(index, next.length); assert next[index] != 0; return (VType) values[index]; } /** {@inheritDoc} */ @Override public VType indexReplace(int index, VType newValue) { assert checkIndex(index, next.length); assert next[index] != 0; VType previousValue = (VType) values[index]; values[index] = newValue; return previousValue; } /** {@inheritDoc} */ @Override public void indexInsert(int index, int key, VType value) { assert index < 0 : "The index must not point at an existing key."; index = ~index; if (next[index] == 0) { keys[index] = key; values[index] = value; next[index] = END_OF_CHAIN; size++; } else { put(key, value, PutPolicy.NEW_GUARANTEED, true); } } /** {@inheritDoc} */ @Override public VType indexRemove(int index) { assert checkIndex(index, next.length); assert next[index] != 0; return remove(index, Integer.MAX_VALUE); } /** {@inheritDoc} */ @Override public String toString() { StringBuilder sBuilder = new StringBuilder(); sBuilder.append('['); // Iterate all entries. for (int index = 0, entryCount = 0; entryCount < size; index++) { if (next[index] != 0) { if (entryCount > 0) { sBuilder.append(", "); } sBuilder.append(keys[index]); sBuilder.append("=>"); sBuilder.append(values[index]); entryCount++; } } sBuilder.append(']'); return sBuilder.toString(); } /** {@inheritDoc} */ @Override public void ensureCapacity(int expectedElements) { allocateBuffers((int) (expectedElements / FIT_LOAD_FACTOR)); } /** {@inheritDoc} */ @Override public String visualizeKeyDistribution(int characters) { return IntBufferVisualizer.visualizeKeyDistribution(keys, next.length - 1, characters); } /** {@inheritDoc} */ @Override public long ramBytesAllocated() { // int: size, iterationSeed return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + Integer.BYTES * 2 + RamUsageEstimator.shallowSizeOfArray(keys) + RamUsageEstimator.shallowSizeOfArray(values) + RamUsageEstimator.shallowSizeOfArray(next); } /** {@inheritDoc} */ @Override public long ramBytesUsed() { // int: size, iterationSeed return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + Integer.BYTES * 2 + RamUsageEstimator.shallowUsedSizeOfArray(keys, size()) + RamUsageEstimator.shallowUsedSizeOfArray(values, size()) + RamUsageEstimator.shallowUsedSizeOfArray(next, size()); } protected void allocateBuffers(int capacity) { capacity = Math.max(capacity, size); capacity = Math.max(BitUtil.nextHighestPowerOfTwo(capacity), MIN_HASH_ARRAY_LENGTH); if (capacity > MAX_HASH_ARRAY_LENGTH) { throw new BufferAllocationException("Maximum array size exceeded (capacity: %d)", capacity); } if (keys != null && keys.length == capacity) { return; } int[] oldKeys = keys; VType[] oldValues = (VType[]) values; byte[] oldNext = next; keys = (new int [capacity]); values = ((VType[]) new Object [capacity]); next = new byte[capacity]; if (oldKeys != null) { putOldEntries(oldKeys, oldValues, oldNext, size); } } /** * Puts old entries after enlarging this map. Old entries are guaranteed not to be already contained by this map. *

This method does not modify this map {@link #size}. It may enlarge this map if it needs room to put the entry.

* * @param oldKeys The old keys. * @param oldValues The old values. * @param oldNext The old next offsets. * @param entryNum The number of non null old entries. It is supported to set a value larger than the real count. */ private void putOldEntries(int[] oldKeys, VType[] oldValues, byte[] oldNext, int entryNum) { int entryCount = 0; // Iterate new entries. // The condition on index < endIndex is required because the putNewEntry() call below may need to // enlarge the map, which calls this method again. And in this case entryNum is larger than the real number. for (int index = 0, endIndex = oldNext.length; entryCount < entryNum && index < endIndex; index++) { if (oldNext[index] != 0) { // Compute the key hash index. int oldKey = oldKeys[index]; int hashIndex = hashMod(oldKey); putNewEntry(hashIndex, next[hashIndex], oldKey, oldValues[index]); entryCount++; } } } /** * Puts an entry in this map. * * @param sizeIncrease Whether to increment {@link #size}. * @return The previous entry value (exact {@code requiredPreviousValue} reference if it matches); * or {@link #noValue()} if there was no previous entry. * @see #put(int, Object) */ private VType put(int key, VType value, PutPolicy policy, boolean sizeIncrease) { // Compute the key hash index. int hashIndex = hashMod(key); int nextOffset = next[hashIndex]; boolean added = false; if (nextOffset > 0 && policy != PutPolicy.NEW_GUARANTEED) { // The bucket contains a head-of-chain entry. // Look for the key in the chain. int entryIndex = searchInChain(key, hashIndex, nextOffset); if (entryIndex >= 0) { // An entry in the chain matches the key. Replace the value and return the previous one. VType previousValue = (VType) values[entryIndex]; if (policy != PutPolicy.NEW_ONLY_IF_ABSENT) { values[entryIndex] = value; } return previousValue; } if (enlargeIfNeeded()) { hashIndex = hashMod(key); nextOffset = next[hashIndex]; } else { // No entry matches the key. Append the new entry at the tail of the chain. // ~entryIndex is the index of the last entry in the chain. if (!appendTailOfChain(~entryIndex, key, value)) { // No free bucket in the range. Enlarge the map and put again. enlargeAndPutNewEntry(key, value); } added = true; } } else if (enlargeIfNeeded()) { hashIndex = hashMod(key); nextOffset = next[hashIndex]; } if (!added) { // No entry matches the key. Add the new entry. putNewEntry(hashIndex, nextOffset, key, value); } if (sizeIncrease) { size++; } return noValue(); } private boolean enlargeIfNeeded() { if (size >= next.length) { allocateBuffers(next.length << 1); return true; } return false; } private void enlargeAndPutNewEntry(int key, VType value) { allocateBuffers(next.length << 1); put(key, value, PutPolicy.NEW_GUARANTEED, false); } /** * Removes the entry at the specified removal index. * Decrements {@link #size}. * * @param entryToRemoveIndex The index of the entry to remove. * @param previousEntryIndex The index of the entry in the chain preceding the entry to remove; or * {@link Integer#MAX_VALUE} if unknown or if the entry to remove is the head-of-chain. * @return The value of the removed entry. */ private VType remove(int entryToRemoveIndex, int previousEntryIndex) { assert checkIndex(entryToRemoveIndex, next.length); assert previousEntryIndex == Integer.MAX_VALUE || checkIndex(previousEntryIndex, next.length); final byte[] next = this.next; VType previousValue = (VType) values[entryToRemoveIndex]; // Find the last entry of the chain. // Replace the removed entry by the last entry of the chain. int nextOffset = next[entryToRemoveIndex]; int beforeLastIndex = findLastOfChain(entryToRemoveIndex, nextOffset, true, next); int lastIndex; if (beforeLastIndex == -1) { // The entry to remove is the last of the chain. lastIndex = entryToRemoveIndex; if (nextOffset < 0) { // Removing the last entry in a chain of at least two entries. beforeLastIndex = previousEntryIndex == Integer.MAX_VALUE ? findPreviousInChain(entryToRemoveIndex, next) : previousEntryIndex; // Unlink the last entry which replaces the removed entry. next[beforeLastIndex] = (byte) (next[beforeLastIndex] > 0 ? END_OF_CHAIN : -END_OF_CHAIN); } } else { int beforeLastNextOffset = next[beforeLastIndex]; lastIndex = addOffset(beforeLastIndex, Math.abs(beforeLastNextOffset), next.length); assert entryToRemoveIndex != lastIndex; // The entry to remove is before the last of the chain. Replace it by the last one. keys[entryToRemoveIndex] = keys[lastIndex]; values[entryToRemoveIndex] = values[lastIndex]; // Unlink the last entry which replaces the removed entry. next[beforeLastIndex] = (byte) (beforeLastNextOffset > 0 ? END_OF_CHAIN : -END_OF_CHAIN); } // Free the last entry of the chain. keys[lastIndex] = 0; values[lastIndex] = noValue(); next[lastIndex] = 0; size--; return previousValue; } /** * Appends a new entry at the tail of an entry chain. * * @param lastEntryIndex The index of the last entry in the chain. * @return true if the new entry is added successfully; false if there is no free bucket * in the range (so this map needs to be enlarged to make room). */ private boolean appendTailOfChain(int lastEntryIndex, int key, VType value) { return appendTailOfChain(lastEntryIndex, key, value, ExcludedIndexes.NONE, 0); } /** * Appends a new entry at the tail of an entry chain. * * @param lastEntryIndex The index of the last entry in the chain. * @param excludedIndexes Indexes to exclude from the search. * @param recursiveCallLevel Keeps track of the recursive call level (starts at 0). * @return true if the new entry is added successfully; false if there is no free bucket * in the range (so this map needs to be enlarged to make room). */ private boolean appendTailOfChain(int lastEntryIndex, int key, VType value, ExcludedIndexes excludedIndexes, int recursiveCallLevel) { // Find the next free bucket by linear probing. final int capacity = next.length; int searchFromIndex = addOffset(lastEntryIndex, 1, capacity); int freeIndex = searchFreeBucket(searchFromIndex, maxOffset(capacity), -1, next); if (freeIndex == -1) { freeIndex = searchAndMoveBucket(searchFromIndex, maxOffset(capacity), excludedIndexes, recursiveCallLevel); if (freeIndex == -1) return false; } keys[freeIndex] = key; values[freeIndex] = value; next[freeIndex] = -END_OF_CHAIN; int nextOffset = getOffsetBetweenIndexes(lastEntryIndex, freeIndex, next.length); next[lastEntryIndex] = (byte) (next[lastEntryIndex] > 0 ? nextOffset : -nextOffset); // Keep the offset sign. return true; } /** * Searches a movable tail-of-chain bucket by linear probing to the right. If a movable tail-of-chain is found, this * method attempts to move it. * * @param fromIndex The index of the entry to start searching from. * @param range The maximum number of buckets to search, starting from index (included), up to index + * range (excluded). * @param excludedIndexes Indexes to exclude from the search. * @param recursiveCallLevel Keeps track of the recursive call level (starts at 0). * @return The index of the freed bucket; or -1 if no bucket could be freed within the range. */ private int searchAndMoveBucket(int fromIndex, int range, ExcludedIndexes excludedIndexes, int recursiveCallLevel) { assert checkIndex(fromIndex, next.length); assert range >= 0 && range <= maxOffset(next.length) : "range=" + range + ", maxOffset=" + maxOffset(next.length); int remainingAttempts = RECURSIVE_MOVE_ATTEMPTS[recursiveCallLevel]; if (remainingAttempts <= 0 || range <= 0) { return -1; } final byte[] next = this.next; final int capacity = next.length; int nextRecursiveCallLevel = recursiveCallLevel + 1; for (int index = fromIndex + range - 1; index >= fromIndex; index--) { int rolledIndex = index & (capacity - 1); if (excludedIndexes.isIndexExcluded(rolledIndex)) { continue; } int nextOffset = next[rolledIndex]; if (nextOffset < 0) { // Attempt to move the tail of chain. if (moveTailOfChain(rolledIndex, nextOffset, excludedIndexes, nextRecursiveCallLevel)) return rolledIndex; if (--remainingAttempts <= 0) return -1; } } return -1; } /** * Puts a new entry that is guaranteed not to be already contained by this map.

This method does not modify this * map {@link #size}. It may enlarge this map if it needs room to put the entry.

* * @param hashIndex The hash index where to put the entry (= {@link #hashMod}(key)). * @param nextOffset The current value of {@link #next}[hashIndex]. */ private void putNewEntry(int hashIndex, int nextOffset, int key, VType value) { assert hashIndex == hashMod(key) : "hashIndex=" + hashIndex + ", hashReduce(key)=" + hashMod(key); assert checkIndex(hashIndex, next.length); assert Math.abs(nextOffset) <= END_OF_CHAIN : "nextOffset=" + nextOffset; assert nextOffset == next[hashIndex] : "nextOffset=" + nextOffset + ", next[hashIndex]=" + next[hashIndex]; if (nextOffset > 0) { // The bucket contains a head-of-chain entry. // Append the new entry at the chain tail, after the last entry of the chain. If there is no free bucket in // the range, enlarge this map and put the new entry. if (!appendTailOfChain(findLastOfChain(hashIndex, nextOffset, false, next), key, value)) { enlargeAndPutNewEntry(key, value); } } else { if (nextOffset < 0) { // Bucket at hash index contains a movable tail-of-chain entry. Move it to free the bucket. if (!moveTailOfChain(hashIndex, nextOffset, ExcludedIndexes.NONE, 0)) { // No free bucket in the range. Enlarge the map and put again. enlargeAndPutNewEntry(key, value); return; } } // Bucket at hash index is free. Add the new head-of-chain entry. keys[hashIndex] = key; values[hashIndex] = value; next[hashIndex] = END_OF_CHAIN; } } /** * Moves a tail-of-chain entry to another free bucket. * * @param tailIndex The index of the tail-of-chain entry. * @param nextOffset The value of {@link #next}[tailIndex]. It is always < 0. * @param excludedIndexes Indexes to exclude from the search. * @param recursiveCallLevel Keeps track of the recursive call level (starts at 0). * @return Whether the entry has been successfully moved; or if it could not because there is no free bucket in the * range. */ private boolean moveTailOfChain(int tailIndex, int nextOffset, ExcludedIndexes excludedIndexes, int recursiveCallLevel) { assert checkIndex(tailIndex, next.length); assert nextOffset < 0 && nextOffset >= -END_OF_CHAIN : "nextOffset=" + nextOffset; assert nextOffset == next[tailIndex] : "nextOffset=" + nextOffset + ", next[tailIndex]=" + next[tailIndex]; // Find the next free bucket by linear probing. // It must be within a range of maxOffset of the previous entry in the chain, // and not beyond the next entry in the chain. final byte[] next = this.next; final int capacity = next.length; final int maxOffset = maxOffset(capacity); int previousIndex = findPreviousInChain(tailIndex, next); int absPreviousOffset = Math.abs(next[previousIndex]); int nextIndex = nextOffset == -END_OF_CHAIN ? -1 : addOffset(tailIndex, -nextOffset, capacity); int offsetFromPreviousToNext = absPreviousOffset - nextOffset; int searchFromIndex; int searchRange; boolean nextIndexWithinRange; // Compare [the offset from previous entry to next entry] to [maxOffset]. if (offsetFromPreviousToNext <= maxOffset) { // The next entry in the chain is inside the maximum offset range. // Prepare to search for a free bucket starting from the tail-of-chain entry, up to the next entry in the chain. searchFromIndex = addOffset(previousIndex, 1, capacity); searchRange = offsetFromPreviousToNext - 1; nextIndexWithinRange = true; } else { // The next entry is not inside the maximum range. It is always the case if nextOffset is -END_OF_CHAIN. // Prepare to search for a free bucket starting from the tail-of-chain entry, up to maxOffset from the // previous entry. if (nextIndex == -1) { searchFromIndex = addOffset(previousIndex, 1, capacity); searchRange = maxOffset; } else { searchFromIndex = addOffset(nextIndex, -maxOffset, capacity); int searchToIndex = addOffset(previousIndex, maxOffset, capacity); searchRange = getOffsetBetweenIndexes(searchFromIndex, searchToIndex, capacity) + 1; } nextIndexWithinRange = false; } int freeIndex = searchFreeBucket(searchFromIndex, searchRange, tailIndex, next); if (freeIndex == -1) { // No free bucket in the range. if (nextIndexWithinRange && appendTailOfChain( findLastOfChain(nextIndex, next[nextIndex], false, next), keys[tailIndex], (VType) values[tailIndex], excludedIndexes, recursiveCallLevel)) { // The entry to move has been appended to the tail of the chain. // Complete the move by linking the previous entry to the next entry (which is within range). int previousOffset = getOffsetBetweenIndexes(previousIndex, nextIndex, capacity); next[previousIndex] = (byte) (next[previousIndex] > 0 ? previousOffset : -previousOffset); // Keep the offset sign. return true; } else { ExcludedIndexes recursiveExcludedIndexes = excludedIndexes.union(ExcludedIndexes.fromChain(previousIndex, next)); if ((freeIndex = searchAndMoveBucket(searchFromIndex, searchRange, recursiveExcludedIndexes, recursiveCallLevel)) == -1) { // No free bucket after the tail of the chain, and no movable entry. No bucket available around. // The move fails (and this map will be enlarged by the calling method). return false; } } } // Move the entry to the free index. // No need to set keys[tailIndex] and values[tailIndex] to null here because they will be set when this method returns, // or the map will be enlarged and rehashed. keys[freeIndex] = keys[tailIndex]; values[freeIndex] = values[tailIndex]; next[freeIndex] = (byte) (nextOffset == -END_OF_CHAIN ? nextOffset : -getOffsetBetweenIndexes(freeIndex, nextIndex, capacity)); int previousOffset = getOffsetBetweenIndexes(previousIndex, freeIndex, capacity); next[previousIndex] = (byte) (next[previousIndex] > 0 ? previousOffset : -previousOffset); // Keep the offset sign. assert next[freeIndex] < 0 : "freeIndex=" + freeIndex + ", next[freeIndex]=" + next[freeIndex]; return true; } /** * Searches an entry in a chain. * * @param key The searched entry key. * @param index The head-of-chain index. * @param nextOffset next[index]. It must be > 0. * @return The matched entry index; or 2's complement ~index if not found, index of the last entry in the chain. */ private int searchInChain(int key, int index, int nextOffset) { assert checkIndex(index, next.length); assert nextOffset > 0 && nextOffset <= END_OF_CHAIN : "nextOffset=" + nextOffset; assert nextOffset == next[index] : "nextOffset=" + nextOffset + ", next[index]=" + next[index]; // There is at least one entry at this bucket. Check the first head-of-chain. if (((key) == (keys[index]))) { // The first head-of-chain entry matches the key. Return its index. return index; } // Follow the entry chain for this bucket. final int capacity = next.length; while (nextOffset != END_OF_CHAIN) { index = addOffset(index, nextOffset, capacity); // Jump forward. if (((key) == (keys[index]))) { // An entry in the chain matches the key. Return its index. return index; } nextOffset = -next[index]; // Next offsets are negative for tail-of-chain entries. assert nextOffset > 0 : "nextOffset=" + nextOffset; } // No entry matches the key. Return the last entry index as 2's complement. return ~index; } /** * Searches an entry in a chain and returns its previous entry in the chain. * * @param key The searched entry key. * @param index The head-of-chain index. * @param nextOffset next[index]. It must be > 0. * @return The index of the entry preceding the matched entry; or {@link Integer#MAX_VALUE} if the head-of-chain * matches; or 2's complement ~index if not found, index of the last entry in the chain. */ private int searchInChainReturnPrevious(int key, int index, int nextOffset) { assert checkIndex(index, next.length); assert nextOffset > 0 && nextOffset <= END_OF_CHAIN : "nextOffset=" + nextOffset; assert nextOffset == next[index] : "nextOffset=" + nextOffset + ", next[index]=" + next[index]; // There is at least one entry at this bucket. Check the first head-of-chain. if (((key) == (keys[index]))) { // The first head-of-chain entry matches the key. Return Integer.MAX_VALUE as there is no previous entry. return Integer.MAX_VALUE; } // Follow the entry chain for this bucket. final int capacity = next.length; while (nextOffset != END_OF_CHAIN) { int previousIndex = index; index = addOffset(index, nextOffset, capacity); // Jump forward. if (((key) == (keys[index]))) { // An entry in the chain matches the key. Return the previous entry index. return previousIndex; } nextOffset = -next[index]; // Next offsets are negative for tail-of-chain entries. assert nextOffset > 0 : "nextOffset=" + nextOffset; } // No entry matches the key. Return the last entry index as 2's complement. return ~index; } /** * Provides the next iteration seed used to build the iteration starting slot and offset increment. * This method does not need to be synchronized, what matters is that each thread gets a sequence of varying seeds. */ protected int nextIterationSeed() { return iterationSeed = BitMixer.mixPhi(iterationSeed); } /** * A view of the keys inside this map. */ public final class KeysContainer extends AbstractIntCollection implements IntLookupContainer { @Override public boolean contains(int e) { return IntObjectWormMap.this.containsKey(e); } @Override public T forEach(final T procedure) { IntObjectWormMap.this.forEach( (IntObjectProcedure) (key, value) -> procedure.apply(key)); return procedure; } @Override public T forEach(final T predicate) { IntObjectWormMap.this.forEach( (IntObjectPredicate) (key, value) -> predicate.apply(key)); return predicate; } @Override public boolean isEmpty() { return IntObjectWormMap.this.isEmpty(); } @Override public Iterator iterator() { return new KeysIterator(); } @Override public int size() { return IntObjectWormMap.this.size(); } @Override public void clear() { IntObjectWormMap.this.clear(); } @Override public void release() { IntObjectWormMap.this.release(); } @Override public int removeAll(IntPredicate predicate) { return IntObjectWormMap.this.removeAll(predicate); } @Override public int removeAll(final int e) { return IntObjectWormMap.this.remove(e) == noValue() ? 0 : 1; } } /** * An iterator over the set of assigned keys. */ private class KeysIterator extends AbstractIterator { private final IntCursor cursor; private final int increment; private int index; private int slot; public KeysIterator() { cursor = new IntCursor(); int seed = nextIterationSeed(); increment = iterationIncrement(seed); slot = seed & (next.length - 1); } @Override protected IntCursor fetch() { final int mask = next.length - 1; while (index <= mask) { index++; slot = (slot + increment) & mask; if (next[slot] != 0) { cursor.index = slot; cursor.value = keys[slot]; return cursor; } } return done(); } } /** * A view over the set of values of this map. */ private class ValuesContainer extends AbstractObjectCollection { @Override public int size() { return IntObjectWormMap.this.size(); } @Override public boolean isEmpty() { return IntObjectWormMap.this.isEmpty(); } @Override public boolean contains(VType value) { for (IntObjectCursor c : IntObjectWormMap.this) { if (java.util.Objects.equals(c.value, value)) { return true; } } return false; } @Override public > T forEach(T procedure) { for (IntObjectCursor c : IntObjectWormMap.this) { procedure.apply(c.value); } return procedure; } @Override public > T forEach(T predicate) { for (IntObjectCursor c : IntObjectWormMap.this) { if (!predicate.apply(c.value)) { break; } } return predicate; } @Override public Iterator> iterator() { return new ValuesIterator(); } @Override public int removeAll(final VType e) { return IntObjectWormMap.this.removeAll((key, value) -> java.util.Objects.equals(value, e)); } @Override public int removeAll(final ObjectPredicate predicate) { return IntObjectWormMap.this.removeAll((key, value) -> predicate.apply(value)); } @Override public void clear() { IntObjectWormMap.this.clear(); } @Override public void release() { IntObjectWormMap.this.release(); } } /** * An iterator over the set of assigned values. */ private class ValuesIterator extends AbstractIterator> { private final ObjectCursor cursor; private final int increment; private int index; private int slot; public ValuesIterator() { cursor = new ObjectCursor(); int seed = nextIterationSeed(); increment = iterationIncrement(seed); slot = seed & (next.length - 1); } @Override protected ObjectCursor fetch() { final int mask = next.length - 1; while (index <= mask) { index++; slot = (slot + increment) & mask; if (next[slot] != 0) { cursor.index = slot; cursor.value = (VType) values[slot]; return cursor; } } return done(); } } /** * An iterator implementation for {@link #iterator}. */ private class EntryIterator extends AbstractIterator> { private final IntObjectCursor cursor; private final int increment; private int index; private int slot; public EntryIterator() { cursor = new IntObjectCursor(); int seed = nextIterationSeed(); increment = iterationIncrement(seed); slot = seed & (next.length - 1); } @Override protected IntObjectCursor fetch() { final int mask = next.length - 1; while (index <= mask) { index++; slot = (slot + increment) & mask; if (next[slot] != 0) { cursor.index = slot; cursor.key = keys[slot]; cursor.value = (VType) values[slot]; return cursor; } } return done(); } } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy