All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.elasticsearch.common.util.PageCacheRecycler Maven / Gradle / Ivy

There is a newer version: 8.13.4
Show newest version
/*
 * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
 * or more contributor license agreements. Licensed under the Elastic License
 * 2.0 and the Server Side Public License, v 1; you may not use this file except
 * in compliance with, at your election, the Elastic License 2.0 or the Server
 * Side Public License, v 1.
 */

package org.elasticsearch.common.util;

import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.recycler.AbstractRecyclerC;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.concurrent.EsExecutors;

import java.util.Arrays;
import java.util.Locale;

import static org.elasticsearch.common.recycler.Recyclers.concurrent;
import static org.elasticsearch.common.recycler.Recyclers.concurrentDeque;
import static org.elasticsearch.common.recycler.Recyclers.dequeFactory;
import static org.elasticsearch.common.recycler.Recyclers.none;

/** A recycler of fixed-size pages. */
public class PageCacheRecycler {

    public static final Setting TYPE_SETTING = new Setting<>(
        "cache.recycler.page.type",
        Type.CONCURRENT.name(),
        Type::parse,
        Property.NodeScope
    );
    public static final Setting LIMIT_HEAP_SETTING = Setting.memorySizeSetting(
        "cache.recycler.page.limit.heap",
        "10%",
        Property.NodeScope
    );
    public static final Setting WEIGHT_BYTES_SETTING = Setting.doubleSetting(
        "cache.recycler.page.weight.bytes",
        1d,
        0d,
        Property.NodeScope
    );
    public static final Setting WEIGHT_LONG_SETTING = Setting.doubleSetting(
        "cache.recycler.page.weight.longs",
        1d,
        0d,
        Property.NodeScope
    );
    public static final Setting WEIGHT_INT_SETTING = Setting.doubleSetting(
        "cache.recycler.page.weight.ints",
        1d,
        0d,
        Property.NodeScope
    );
    // object pages are less useful to us so we give them a lower weight by default
    public static final Setting WEIGHT_OBJECTS_SETTING = Setting.doubleSetting(
        "cache.recycler.page.weight.objects",
        0.1d,
        0d,
        Property.NodeScope
    );

    /** Page size in bytes: 16KB */
    public static final int PAGE_SIZE_IN_BYTES = 1 << 14;
    public static final int OBJECT_PAGE_SIZE = PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF;
    public static final int LONG_PAGE_SIZE = PAGE_SIZE_IN_BYTES / Long.BYTES;
    public static final int INT_PAGE_SIZE = PAGE_SIZE_IN_BYTES / Integer.BYTES;
    public static final int BYTE_PAGE_SIZE = PAGE_SIZE_IN_BYTES;

    private final Recycler bytePage;
    private final Recycler intPage;
    private final Recycler longPage;
    private final Recycler objectPage;

    public static final PageCacheRecycler NON_RECYCLING_INSTANCE;

    static {
        NON_RECYCLING_INSTANCE = new PageCacheRecycler(Settings.builder().put(LIMIT_HEAP_SETTING.getKey(), "0%").build());
    }

    public PageCacheRecycler(Settings settings) {
        final Type type = TYPE_SETTING.get(settings);
        final long limit = LIMIT_HEAP_SETTING.get(settings).getBytes();
        final int allocatedProcessors = EsExecutors.allocatedProcessors(settings);

        // We have a global amount of memory that we need to divide across data types.
        // Since some types are more useful than other ones we give them different weights.
        // Trying to store all of them in a single stack would be problematic because eg.
        // a work load could fill the recycler with only byte[] pages and then another
        // workload that would work with double[] pages couldn't recycle them because there
        // is no space left in the stack/queue. LRU/LFU policies are not an option either
        // because they would make obtain/release too costly: we really need constant-time
        // operations.
        // Ultimately a better solution would be to only store one kind of data and have the
        // ability to interpret it either as a source of bytes, doubles, longs, etc. eg. thanks
        // to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues
        // that would need to be addressed such as garbage collection of native memory or safety
        // of Unsafe writes.
        final double bytesWeight = WEIGHT_BYTES_SETTING.get(settings);
        final double intsWeight = WEIGHT_INT_SETTING.get(settings);
        final double longsWeight = WEIGHT_LONG_SETTING.get(settings);
        final double objectsWeight = WEIGHT_OBJECTS_SETTING.get(settings);

        final double totalWeight = bytesWeight + intsWeight + longsWeight + objectsWeight;
        final int maxPageCount = (int) Math.min(Integer.MAX_VALUE, limit / PAGE_SIZE_IN_BYTES);

        final int maxBytePageCount = (int) (bytesWeight * maxPageCount / totalWeight);
        bytePage = build(type, maxBytePageCount, allocatedProcessors, new AbstractRecyclerC() {
            @Override
            public byte[] newInstance() {
                return new byte[BYTE_PAGE_SIZE];
            }

            @Override
            public void recycle(byte[] value) {
                // nothing to do
            }
        });

        final int maxIntPageCount = (int) (intsWeight * maxPageCount / totalWeight);
        intPage = build(type, maxIntPageCount, allocatedProcessors, new AbstractRecyclerC() {
            @Override
            public int[] newInstance() {
                return new int[INT_PAGE_SIZE];
            }

            @Override
            public void recycle(int[] value) {
                // nothing to do
            }
        });

        final int maxLongPageCount = (int) (longsWeight * maxPageCount / totalWeight);
        longPage = build(type, maxLongPageCount, allocatedProcessors, new AbstractRecyclerC() {
            @Override
            public long[] newInstance() {
                return new long[LONG_PAGE_SIZE];
            }

            @Override
            public void recycle(long[] value) {
                // nothing to do
            }
        });

        final int maxObjectPageCount = (int) (objectsWeight * maxPageCount / totalWeight);
        objectPage = build(type, maxObjectPageCount, allocatedProcessors, new AbstractRecyclerC() {
            @Override
            public Object[] newInstance() {
                return new Object[OBJECT_PAGE_SIZE];
            }

            @Override
            public void recycle(Object[] value) {
                Arrays.fill(value, null); // we need to remove the strong refs on the objects stored in the array
            }
        });

        assert PAGE_SIZE_IN_BYTES * (maxBytePageCount + maxIntPageCount + maxLongPageCount + maxObjectPageCount) <= limit;
    }

    public Recycler.V bytePage(boolean clear) {
        final Recycler.V v = bytePage.obtain();
        if (v.isRecycled() && clear) {
            Arrays.fill(v.v(), (byte) 0);
        }
        return v;
    }

    public Recycler.V intPage(boolean clear) {
        final Recycler.V v = intPage.obtain();
        if (v.isRecycled() && clear) {
            Arrays.fill(v.v(), 0);
        }
        return v;
    }

    public Recycler.V longPage(boolean clear) {
        final Recycler.V v = longPage.obtain();
        if (v.isRecycled() && clear) {
            Arrays.fill(v.v(), 0L);
        }
        return v;
    }

    public Recycler.V objectPage() {
        // object pages are cleared on release anyway
        return objectPage.obtain();
    }

    private static  Recycler build(Type type, int limit, int availableProcessors, Recycler.C c) {
        final Recycler recycler;
        if (limit == 0) {
            recycler = none(c);
        } else {
            recycler = type.build(c, limit, availableProcessors);
        }
        return recycler;
    }

    public enum Type {
        QUEUE {
            @Override
             Recycler build(Recycler.C c, int limit, int availableProcessors) {
                return concurrentDeque(c, limit);
            }
        },
        CONCURRENT {
            @Override
             Recycler build(Recycler.C c, int limit, int availableProcessors) {
                return concurrent(dequeFactory(c, limit / availableProcessors), availableProcessors);
            }
        },
        NONE {
            @Override
             Recycler build(Recycler.C c, int limit, int availableProcessors) {
                return none(c);
            }
        };

        public static Type parse(String type) {
            try {
                return Type.valueOf(type.toUpperCase(Locale.ROOT));
            } catch (IllegalArgumentException e) {
                throw new IllegalArgumentException("no type support [" + type + "]");
            }
        }

        abstract  Recycler build(Recycler.C c, int limit, int availableProcessors);
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy