io.netty.buffer.PoolChunk Maven / Gradle / Ivy
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import io.netty.util.internal.LongCounter;
import io.netty.util.internal.PlatformDependent;
import java.nio.ByteBuffer;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.PriorityQueue;
import java.util.concurrent.locks.ReentrantLock;
/**
* Description of algorithm for PageRun/PoolSubpage allocation from PoolChunk
*
* Notation: The following terms are important to understand the code
* > page - a page is the smallest unit of memory chunk that can be allocated
* > run - a run is a collection of pages
* > chunk - a chunk is a collection of runs
* > in this code chunkSize = maxPages * pageSize
*
* To begin we allocate a byte array of size = chunkSize
* Whenever a ByteBuf of given size needs to be created we search for the first position
* in the byte array that has enough empty space to accommodate the requested size and
* return a (long) handle that encodes this offset information, (this memory segment is then
* marked as reserved so it is always used by exactly one ByteBuf and no more)
*
* For simplicity all sizes are normalized according to {@link PoolArena#size2SizeIdx(int)} method.
* This ensures that when we request for memory segments of size > pageSize the normalizedCapacity
* equals the next nearest size in {@link SizeClasses}.
*
*
* A chunk has the following layout:
*
* /-----------------\
* | run |
* | |
* | |
* |-----------------|
* | run |
* | |
* |-----------------|
* | unalloctated |
* | (freed) |
* | |
* |-----------------|
* | subpage |
* |-----------------|
* | unallocated |
* | (freed) |
* | ... |
* | ... |
* | ... |
* | |
* | |
* | |
* \-----------------/
*
*
* handle:
* -------
* a handle is a long number, the bit layout of a run looks like:
*
* oooooooo ooooooos ssssssss ssssssue bbbbbbbb bbbbbbbb bbbbbbbb bbbbbbbb
*
* o: runOffset (page offset in the chunk), 15bit
* s: size (number of pages) of this run, 15bit
* u: isUsed?, 1bit
* e: isSubpage?, 1bit
* b: bitmapIdx of subpage, zero if it's not subpage, 32bit
*
* runsAvailMap:
* ------
* a map which manages all runs (used and not in used).
* For each run, the first runOffset and last runOffset are stored in runsAvailMap.
* key: runOffset
* value: handle
*
* runsAvail:
* ----------
* an array of {@link PriorityQueue}.
* Each queue manages same size of runs.
* Runs are sorted by offset, so that we always allocate runs with smaller offset.
*
*
* Algorithm:
* ----------
*
* As we allocate runs, we update values stored in runsAvailMap and runsAvail so that the property is maintained.
*
* Initialization -
* In the beginning we store the initial run which is the whole chunk.
* The initial run:
* runOffset = 0
* size = chunkSize
* isUsed = no
* isSubpage = no
* bitmapIdx = 0
*
*
* Algorithm: [allocateRun(size)]
* ----------
* 1) find the first avail run using in runsAvails according to size
* 2) if pages of run is larger than request pages then split it, and save the tailing run
* for later using
*
* Algorithm: [allocateSubpage(size)]
* ----------
* 1) find a not full subpage according to size.
* if it already exists just return, otherwise allocate a new PoolSubpage and call init()
* note that this subpage object is added to subpagesPool in the PoolArena when we init() it
* 2) call subpage.allocate()
*
* Algorithm: [free(handle, length, nioBuffer)]
* ----------
* 1) if it is a subpage, return the slab back into this subpage
* 2) if the subpage is not used or it is a run, then start free this run
* 3) merge continuous avail runs
* 4) save the merged run
*
*/
final class PoolChunk implements PoolChunkMetric {
private static final int SIZE_BIT_LENGTH = 15;
private static final int INUSED_BIT_LENGTH = 1;
private static final int SUBPAGE_BIT_LENGTH = 1;
private static final int BITMAP_IDX_BIT_LENGTH = 32;
static final int IS_SUBPAGE_SHIFT = BITMAP_IDX_BIT_LENGTH;
static final int IS_USED_SHIFT = SUBPAGE_BIT_LENGTH + IS_SUBPAGE_SHIFT;
static final int SIZE_SHIFT = INUSED_BIT_LENGTH + IS_USED_SHIFT;
static final int RUN_OFFSET_SHIFT = SIZE_BIT_LENGTH + SIZE_SHIFT;
final PoolArena arena;
final Object base;
final T memory;
final boolean unpooled;
/**
* store the first page and last page of each avail run
*/
private final LongLongHashMap runsAvailMap;
/**
* manage all avail runs
*/
private final IntPriorityQueue[] runsAvail;
private final ReentrantLock runsAvailLock;
/**
* manage all subpages in this chunk
*/
private final PoolSubpage[] subpages;
/**
* Accounting of pinned memory – memory that is currently in use by ByteBuf instances.
*/
private final LongCounter pinnedBytes = PlatformDependent.newLongCounter();
private final int pageSize;
private final int pageShifts;
private final int chunkSize;
// Use as cache for ByteBuffer created from the memory. These are just duplicates and so are only a container
// around the memory itself. These are often needed for operations within the Pooled*ByteBuf and so
// may produce extra GC, which can be greatly reduced by caching the duplicates.
//
// This may be null if the PoolChunk is unpooled as pooling the ByteBuffer instances does not make any sense here.
private final Deque cachedNioBuffers;
int freeBytes;
PoolChunkList parent;
PoolChunk prev;
PoolChunk next;
// TODO: Test if adding padding helps under contention
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
@SuppressWarnings("unchecked")
PoolChunk(PoolArena arena, Object base, T memory, int pageSize, int pageShifts, int chunkSize, int maxPageIdx) {
unpooled = false;
this.arena = arena;
this.base = base;
this.memory = memory;
this.pageSize = pageSize;
this.pageShifts = pageShifts;
this.chunkSize = chunkSize;
freeBytes = chunkSize;
runsAvail = newRunsAvailqueueArray(maxPageIdx);
runsAvailLock = new ReentrantLock();
runsAvailMap = new LongLongHashMap(-1);
subpages = new PoolSubpage[chunkSize >> pageShifts];
//insert initial run, offset = 0, pages = chunkSize / pageSize
int pages = chunkSize >> pageShifts;
long initHandle = (long) pages << SIZE_SHIFT;
insertAvailRun(0, pages, initHandle);
cachedNioBuffers = new ArrayDeque(8);
}
/** Creates a special chunk that is not pooled. */
PoolChunk(PoolArena arena, Object base, T memory, int size) {
unpooled = true;
this.arena = arena;
this.base = base;
this.memory = memory;
pageSize = 0;
pageShifts = 0;
runsAvailMap = null;
runsAvail = null;
runsAvailLock = null;
subpages = null;
chunkSize = size;
cachedNioBuffers = null;
}
private static IntPriorityQueue[] newRunsAvailqueueArray(int size) {
IntPriorityQueue[] queueArray = new IntPriorityQueue[size];
for (int i = 0; i < queueArray.length; i++) {
queueArray[i] = new IntPriorityQueue();
}
return queueArray;
}
private void insertAvailRun(int runOffset, int pages, long handle) {
int pageIdxFloor = arena.pages2pageIdxFloor(pages);
IntPriorityQueue queue = runsAvail[pageIdxFloor];
assert isRun(handle);
queue.offer((int) (handle >> BITMAP_IDX_BIT_LENGTH));
//insert first page of run
insertAvailRun0(runOffset, handle);
if (pages > 1) {
//insert last page of run
insertAvailRun0(lastPage(runOffset, pages), handle);
}
}
private void insertAvailRun0(int runOffset, long handle) {
long pre = runsAvailMap.put(runOffset, handle);
assert pre == -1;
}
private void removeAvailRun(long handle) {
int pageIdxFloor = arena.pages2pageIdxFloor(runPages(handle));
runsAvail[pageIdxFloor].remove((int) (handle >> BITMAP_IDX_BIT_LENGTH));
removeAvailRun0(handle);
}
private void removeAvailRun0(long handle) {
int runOffset = runOffset(handle);
int pages = runPages(handle);
//remove first page of run
runsAvailMap.remove(runOffset);
if (pages > 1) {
//remove last page of run
runsAvailMap.remove(lastPage(runOffset, pages));
}
}
private static int lastPage(int runOffset, int pages) {
return runOffset + pages - 1;
}
private long getAvailRunByOffset(int runOffset) {
return runsAvailMap.get(runOffset);
}
@Override
public int usage() {
final int freeBytes;
if (this.unpooled) {
freeBytes = this.freeBytes;
} else {
runsAvailLock.lock();
try {
freeBytes = this.freeBytes;
} finally {
runsAvailLock.unlock();
}
}
return usage(freeBytes);
}
private int usage(int freeBytes) {
if (freeBytes == 0) {
return 100;
}
int freePercentage = (int) (freeBytes * 100L / chunkSize);
if (freePercentage == 0) {
return 99;
}
return 100 - freePercentage;
}
boolean allocate(PooledByteBuf buf, int reqCapacity, int sizeIdx, PoolThreadCache cache) {
final long handle;
if (sizeIdx <= arena.smallMaxSizeIdx) {
final PoolSubpage nextSub;
// small
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
PoolSubpage head = arena.findSubpagePoolHead(sizeIdx);
head.lock();
try {
nextSub = head.next;
if (nextSub != head) {
assert nextSub.doNotDestroy && nextSub.elemSize == arena.sizeIdx2size(sizeIdx) : "doNotDestroy=" +
nextSub.doNotDestroy + ", elemSize=" + nextSub.elemSize + ", sizeIdx=" + sizeIdx;
handle = nextSub.allocate();
assert handle >= 0;
assert isSubpage(handle);
nextSub.chunk.initBufWithSubpage(buf, null, handle, reqCapacity, cache);
return true;
}
handle = allocateSubpage(sizeIdx, head);
if (handle < 0) {
return false;
}
assert isSubpage(handle);
} finally {
head.unlock();
}
} else {
// normal
// runSize must be multiple of pageSize
int runSize = arena.sizeIdx2size(sizeIdx);
handle = allocateRun(runSize);
if (handle < 0) {
return false;
}
assert !isSubpage(handle);
}
ByteBuffer nioBuffer = cachedNioBuffers != null? cachedNioBuffers.pollLast() : null;
initBuf(buf, nioBuffer, handle, reqCapacity, cache);
return true;
}
private long allocateRun(int runSize) {
int pages = runSize >> pageShifts;
int pageIdx = arena.pages2pageIdx(pages);
runsAvailLock.lock();
try {
//find first queue which has at least one big enough run
int queueIdx = runFirstBestFit(pageIdx);
if (queueIdx == -1) {
return -1;
}
//get run with min offset in this queue
IntPriorityQueue queue = runsAvail[queueIdx];
long handle = queue.poll();
assert handle != IntPriorityQueue.NO_VALUE;
handle <<= BITMAP_IDX_BIT_LENGTH;
assert !isUsed(handle) : "invalid handle: " + handle;
removeAvailRun0(handle);
handle = splitLargeRun(handle, pages);
int pinnedSize = runSize(pageShifts, handle);
freeBytes -= pinnedSize;
return handle;
} finally {
runsAvailLock.unlock();
}
}
private int calculateRunSize(int sizeIdx) {
int maxElements = 1 << pageShifts - SizeClasses.LOG2_QUANTUM;
int runSize = 0;
int nElements;
final int elemSize = arena.sizeIdx2size(sizeIdx);
//find lowest common multiple of pageSize and elemSize
do {
runSize += pageSize;
nElements = runSize / elemSize;
} while (nElements < maxElements && runSize != nElements * elemSize);
while (nElements > maxElements) {
runSize -= pageSize;
nElements = runSize / elemSize;
}
assert nElements > 0;
assert runSize <= chunkSize;
assert runSize >= elemSize;
return runSize;
}
private int runFirstBestFit(int pageIdx) {
if (freeBytes == chunkSize) {
return arena.nPSizes - 1;
}
for (int i = pageIdx; i < arena.nPSizes; i++) {
IntPriorityQueue queue = runsAvail[i];
if (queue != null && !queue.isEmpty()) {
return i;
}
}
return -1;
}
private long splitLargeRun(long handle, int needPages) {
assert needPages > 0;
int totalPages = runPages(handle);
assert needPages <= totalPages;
int remPages = totalPages - needPages;
if (remPages > 0) {
int runOffset = runOffset(handle);
// keep track of trailing unused pages for later use
int availOffset = runOffset + needPages;
long availRun = toRunHandle(availOffset, remPages, 0);
insertAvailRun(availOffset, remPages, availRun);
// not avail
return toRunHandle(runOffset, needPages, 1);
}
//mark it as used
handle |= 1L << IS_USED_SHIFT;
return handle;
}
/**
* Create / initialize a new PoolSubpage of normCapacity. Any PoolSubpage created / initialized here is added to
* subpage pool in the PoolArena that owns this PoolChunk.
*
* @param sizeIdx sizeIdx of normalized size
* @param head head of subpages
*
* @return index in memoryMap
*/
private long allocateSubpage(int sizeIdx, PoolSubpage head) {
//allocate a new run
int runSize = calculateRunSize(sizeIdx);
//runSize must be multiples of pageSize
long runHandle = allocateRun(runSize);
if (runHandle < 0) {
return -1;
}
int runOffset = runOffset(runHandle);
assert subpages[runOffset] == null;
int elemSize = arena.sizeIdx2size(sizeIdx);
PoolSubpage subpage = new PoolSubpage(head, this, pageShifts, runOffset,
runSize(pageShifts, runHandle), elemSize);
subpages[runOffset] = subpage;
return subpage.allocate();
}
/**
* Free a subpage or a run of pages When a subpage is freed from PoolSubpage, it might be added back to subpage pool
* of the owning PoolArena. If the subpage pool in PoolArena has at least one other PoolSubpage of given elemSize,
* we can completely free the owning Page so it is available for subsequent allocations
*
* @param handle handle to free
*/
void free(long handle, int normCapacity, ByteBuffer nioBuffer) {
if (isSubpage(handle)) {
int sizeIdx = arena.size2SizeIdx(normCapacity);
PoolSubpage head = arena.findSubpagePoolHead(sizeIdx);
int sIdx = runOffset(handle);
PoolSubpage subpage = subpages[sIdx];
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
head.lock();
try {
assert subpage != null && subpage.doNotDestroy;
if (subpage.free(head, bitmapIdx(handle))) {
//the subpage is still used, do not free it
return;
}
assert !subpage.doNotDestroy;
// Null out slot in the array as it was freed and we should not use it anymore.
subpages[sIdx] = null;
} finally {
head.unlock();
}
}
int runSize = runSize(pageShifts, handle);
//start free run
runsAvailLock.lock();
try {
// collapse continuous runs, successfully collapsed runs
// will be removed from runsAvail and runsAvailMap
long finalRun = collapseRuns(handle);
//set run as not used
finalRun &= ~(1L << IS_USED_SHIFT);
//if it is a subpage, set it to run
finalRun &= ~(1L << IS_SUBPAGE_SHIFT);
insertAvailRun(runOffset(finalRun), runPages(finalRun), finalRun);
freeBytes += runSize;
} finally {
runsAvailLock.unlock();
}
if (nioBuffer != null && cachedNioBuffers != null &&
cachedNioBuffers.size() < PooledByteBufAllocator.DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK) {
cachedNioBuffers.offer(nioBuffer);
}
}
private long collapseRuns(long handle) {
return collapseNext(collapsePast(handle));
}
private long collapsePast(long handle) {
for (;;) {
int runOffset = runOffset(handle);
int runPages = runPages(handle);
long pastRun = getAvailRunByOffset(runOffset - 1);
if (pastRun == -1) {
return handle;
}
int pastOffset = runOffset(pastRun);
int pastPages = runPages(pastRun);
//is continuous
if (pastRun != handle && pastOffset + pastPages == runOffset) {
//remove past run
removeAvailRun(pastRun);
handle = toRunHandle(pastOffset, pastPages + runPages, 0);
} else {
return handle;
}
}
}
private long collapseNext(long handle) {
for (;;) {
int runOffset = runOffset(handle);
int runPages = runPages(handle);
long nextRun = getAvailRunByOffset(runOffset + runPages);
if (nextRun == -1) {
return handle;
}
int nextOffset = runOffset(nextRun);
int nextPages = runPages(nextRun);
//is continuous
if (nextRun != handle && runOffset + runPages == nextOffset) {
//remove next run
removeAvailRun(nextRun);
handle = toRunHandle(runOffset, runPages + nextPages, 0);
} else {
return handle;
}
}
}
private static long toRunHandle(int runOffset, int runPages, int inUsed) {
return (long) runOffset << RUN_OFFSET_SHIFT
| (long) runPages << SIZE_SHIFT
| (long) inUsed << IS_USED_SHIFT;
}
void initBuf(PooledByteBuf buf, ByteBuffer nioBuffer, long handle, int reqCapacity,
PoolThreadCache threadCache) {
if (isSubpage(handle)) {
initBufWithSubpage(buf, nioBuffer, handle, reqCapacity, threadCache);
} else {
int maxLength = runSize(pageShifts, handle);
buf.init(this, nioBuffer, handle, runOffset(handle) << pageShifts,
reqCapacity, maxLength, arena.parent.threadCache());
}
}
void initBufWithSubpage(PooledByteBuf buf, ByteBuffer nioBuffer, long handle, int reqCapacity,
PoolThreadCache threadCache) {
int runOffset = runOffset(handle);
int bitmapIdx = bitmapIdx(handle);
PoolSubpage s = subpages[runOffset];
assert s.doNotDestroy;
assert reqCapacity <= s.elemSize : reqCapacity + "<=" + s.elemSize;
int offset = (runOffset << pageShifts) + bitmapIdx * s.elemSize;
buf.init(this, nioBuffer, handle, offset, reqCapacity, s.elemSize, threadCache);
}
void incrementPinnedMemory(int delta) {
assert delta > 0;
pinnedBytes.add(delta);
}
void decrementPinnedMemory(int delta) {
assert delta > 0;
pinnedBytes.add(-delta);
}
@Override
public int chunkSize() {
return chunkSize;
}
@Override
public int freeBytes() {
if (this.unpooled) {
return freeBytes;
}
runsAvailLock.lock();
try {
return freeBytes;
} finally {
runsAvailLock.unlock();
}
}
public int pinnedBytes() {
return (int) pinnedBytes.value();
}
@Override
public String toString() {
final int freeBytes;
if (this.unpooled) {
freeBytes = this.freeBytes;
} else {
runsAvailLock.lock();
try {
freeBytes = this.freeBytes;
} finally {
runsAvailLock.unlock();
}
}
return new StringBuilder()
.append("Chunk(")
.append(Integer.toHexString(System.identityHashCode(this)))
.append(": ")
.append(usage(freeBytes))
.append("%, ")
.append(chunkSize - freeBytes)
.append('/')
.append(chunkSize)
.append(')')
.toString();
}
void destroy() {
arena.destroyChunk(this);
}
static int runOffset(long handle) {
return (int) (handle >> RUN_OFFSET_SHIFT);
}
static int runSize(int pageShifts, long handle) {
return runPages(handle) << pageShifts;
}
static int runPages(long handle) {
return (int) (handle >> SIZE_SHIFT & 0x7fff);
}
static boolean isUsed(long handle) {
return (handle >> IS_USED_SHIFT & 1) == 1L;
}
static boolean isRun(long handle) {
return !isSubpage(handle);
}
static boolean isSubpage(long handle) {
return (handle >> IS_SUBPAGE_SHIFT & 1) == 1L;
}
static int bitmapIdx(long handle) {
return (int) handle;
}
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy