org.apache.phoenix.schema.PMetaDataCache Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.schema;
import java.util.Comparator;
import java.util.Iterator;
import java.util.Map;
import org.apache.phoenix.parse.PFunction;
import org.apache.phoenix.parse.PSchema;
import org.apache.phoenix.util.TimeKeeper;
import com.google.common.collect.Maps;
import com.google.common.collect.MinMaxPriorityQueue;
import com.google.common.primitives.Longs;
class PMetaDataCache implements Cloneable {
private static final int MIN_REMOVAL_SIZE = 3;
private static final Comparator COMPARATOR = new Comparator() {
@Override
public int compare(PTableRef tableRef1, PTableRef tableRef2) {
return Longs.compare(tableRef1.getLastAccessTime(), tableRef2.getLastAccessTime());
}
};
private static final MinMaxPriorityQueue.Builder BUILDER = MinMaxPriorityQueue.orderedBy(COMPARATOR);
private long currentByteSize;
private final long maxByteSize;
private final int expectedCapacity;
private final TimeKeeper timeKeeper;
private final PTableRefFactory tableRefFactory;
private final Map tables;
final Map functions;
final Map schemas;
private static Map newMap(int expectedCapacity) {
// Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
// safely across multiple threads (as the underlying collection is not thread safe).
// Instead, we track access time and prune it based on the copy we've made.
return Maps.newHashMapWithExpectedSize(expectedCapacity);
}
private static Map newFunctionMap(int expectedCapacity) {
// Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
// safely across multiple threads (as the underlying collection is not thread safe).
// Instead, we track access time and prune it based on the copy we've made.
return Maps.newHashMapWithExpectedSize(expectedCapacity);
}
private static Map newSchemaMap(int expectedCapacity) {
// Use regular HashMap, as we cannot use a LinkedHashMap that orders by access time
// safely across multiple threads (as the underlying collection is not thread safe).
// Instead, we track access time and prune it based on the copy we've made.
return Maps.newHashMapWithExpectedSize(expectedCapacity);
}
private Map cloneMap(Map tables, int expectedCapacity) {
Map newTables = newMap(Math.max(tables.size(),expectedCapacity));
// Copy value so that access time isn't changing anymore
for (PTableRef tableAccess : tables.values()) {
newTables.put(tableAccess.getTable().getKey(), tableRefFactory.makePTableRef(tableAccess));
}
return newTables;
}
private static Map cloneSchemaMap(Map schemas, int expectedCapacity) {
Map newSchemas = newSchemaMap(Math.max(schemas.size(), expectedCapacity));
// Copy value so that access time isn't changing anymore
for (PSchema schema : schemas.values()) {
newSchemas.put(schema.getSchemaKey(), new PSchema(schema));
}
return newSchemas;
}
private static Map cloneFunctionsMap(Map functions, int expectedCapacity) {
Map newFunctions = newFunctionMap(Math.max(functions.size(),expectedCapacity));
for (PFunction functionAccess : functions.values()) {
newFunctions.put(functionAccess.getKey(), new PFunction(functionAccess));
}
return newFunctions;
}
PMetaDataCache(PMetaDataCache toClone) {
this.tableRefFactory = toClone.tableRefFactory;
this.timeKeeper = toClone.timeKeeper;
this.maxByteSize = toClone.maxByteSize;
this.currentByteSize = toClone.currentByteSize;
this.expectedCapacity = toClone.expectedCapacity;
this.tables = cloneMap(toClone.tables, expectedCapacity);
this.functions = cloneFunctionsMap(toClone.functions, expectedCapacity);
this.schemas = cloneSchemaMap(toClone.schemas, expectedCapacity);
}
public PMetaDataCache(int initialCapacity, long maxByteSize, TimeKeeper timeKeeper, PTableRefFactory tableRefFactory) {
this.currentByteSize = 0;
this.maxByteSize = maxByteSize;
this.expectedCapacity = initialCapacity;
this.tables = newMap(this.expectedCapacity);
this.functions = newFunctionMap(this.expectedCapacity);
this.timeKeeper = timeKeeper;
this.schemas = newSchemaMap(this.expectedCapacity);
this.tableRefFactory = tableRefFactory;
}
public PTableRef get(PTableKey key) {
PTableRef tableAccess = this.tables.get(key);
if (tableAccess == null) {
return null;
}
tableAccess.setLastAccessTime(timeKeeper.getCurrentTime());
return tableAccess;
}
@Override
public PMetaDataCache clone() {
return new PMetaDataCache(this);
}
/**
* Used when the cache is growing past its max size to clone in a single pass.
* Removes least recently used tables to get size of cache below its max size by
* the overage amount.
*/
public PMetaDataCache cloneMinusOverage(long overage) {
assert(overage > 0);
int nToRemove = Math.max(MIN_REMOVAL_SIZE, (int)Math.ceil((currentByteSize-maxByteSize) / ((double)currentByteSize / size())) + 1);
MinMaxPriorityQueue toRemove = BUILDER.expectedSize(nToRemove).create();
PMetaDataCache newCache = new PMetaDataCache(this.size(), this.maxByteSize, this.timeKeeper, this.tableRefFactory);
long toRemoveBytes = 0;
// Add to new cache, but track references to remove when done
// to bring cache at least overage amount below it's max size.
for (PTableRef tableRef : this.tables.values()) {
newCache.put(tableRef.getTable().getKey(), tableRefFactory.makePTableRef(tableRef));
toRemove.add(tableRef);
toRemoveBytes += tableRef.getEstimatedSize();
while (toRemoveBytes - toRemove.peekLast().getEstimatedSize() >= overage) {
PTableRef removedRef = toRemove.removeLast();
toRemoveBytes -= removedRef.getEstimatedSize();
}
}
for (PTableRef toRemoveRef : toRemove) {
newCache.remove(toRemoveRef.getTable().getKey());
}
return newCache;
}
PTable put(PTableKey key, PTableRef ref) {
currentByteSize += ref.getEstimatedSize();
PTableRef oldTableAccess = this.tables.put(key, ref);
PTable oldTable = null;
if (oldTableAccess != null) {
currentByteSize -= oldTableAccess.getEstimatedSize();
oldTable = oldTableAccess.getTable();
}
return oldTable;
}
public long getAge(PTableRef ref) {
return timeKeeper.getCurrentTime() - ref.getCreateTime();
}
public PTable remove(PTableKey key) {
PTableRef value = this.tables.remove(key);
if (value == null) {
return null;
}
currentByteSize -= value.getEstimatedSize();
return value.getTable();
}
public Iterator iterator() {
final Iterator iterator = this.tables.values().iterator();
return new Iterator() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public PTable next() {
return iterator.next().getTable();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
public int size() {
return this.tables.size();
}
public long getCurrentSize() {
return this.currentByteSize;
}
public long getMaxSize() {
return this.maxByteSize;
}
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy