All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.flink.runtime.state.gemini.GeminiOptions Maven / Gradle / Ivy

There is a newer version: 1.5.1
Show newest version
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.flink.runtime.state.gemini;

import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
import org.apache.flink.runtime.state.gemini.engine.GeminiDB;
import org.apache.flink.runtime.state.gemini.engine.filecache.FileCache;
import org.apache.flink.runtime.state.gemini.engine.page.PageConstants;

import static org.apache.flink.configuration.ConfigOptions.key;

/**
 * A collection of all configuration options that relate to {@link GeminiDB}.
 */
public class GeminiOptions {

	private static final String KEY_PREFIX_NAME = "state.backend.gemini";

	// options for storage path =============================================================

	public static final ConfigOption DFS_PATH =
		key(buildKey("dfs.dir"))
		.noDefaultValue()
		.withDescription("The dfs directory where GeminiDB puts its files.");

	/** The local directory (on the TaskManager) where GeminiDB puts its files. */
	public static final ConfigOption LOCAL_PATH =
		key(buildKey("local.dir"))
		.noDefaultValue()
		.withDescription("The local directory (on the TaskManager) where GeminiDB puts its files.");

	// options for metrics =============================================================

	public static final ConfigOption SAMPLE_COUNT =
		key(buildKey("metric.sample.count"))
		.defaultValue(100)
		.withDescription("The sampling rate of state related metrics based on action count.");

	public static final ConfigOption HISTOGRAM_WINDOW_SIZE =
		key(buildKey("metric.histogram.window"))
		.defaultValue(5)
		.withDescription("The window size used for state histogram metrics.");

	// options for DB threads =============================================================

	public static final ConfigOption REGION_THREAD_NUM =
		key(buildKey("region.thread.num"))
		.defaultValue(4)
		.withDescription("Number of threads used to add, update and remove pages from PageStores." +
			"Multiple regions may share a thread.");

	public static final ConfigOption FLUSH_THREAD_NUM =
		key(buildKey("flush.thread.num"))
		.defaultValue(4)
		.withDescription("Number of threads used to flush pages to files.");

	// options for DB memory usage =============================================================

	public static final ConfigOption USE_OFFHEAP =
		key(buildKey("use.offheap"))
		.defaultValue(false)
		.withDescription("Whether to use off-heap.");

	public static final ConfigOption USE_OFFHEAP_FOR_READ =
		key(buildKey("use.offheap.for-read"))
		.defaultValue(false)
		.withDescription("Whether to use off-heap when reading data from local files.");

	public static final ConfigOption HEAP_SIZE =
		key(buildKey("heap.size"))
		.noDefaultValue()
		.withDescription("Size of heap that backend can use, and must be positive.");

	public static final ConfigOption OFFHEAP_SIZE =
		key(buildKey("offheap.size"))
		.noDefaultValue()
		.withDescription("Size of offheap that backend can use, and must be positive.");

	public static final ConfigOption MEMORY_RATIO =
		key(buildKey("memory.ratio"))
		.defaultValue(PageConstants.DEFAULT_MEMORY_RATIO)
		.withDescription("Ratio of memory used by DB relative to JVM's heap/offheap. " +
			"This option only works when " + HEAP_SIZE.key() + " or " + OFFHEAP_SIZE.key() +
			" is not set.");

	public static final ConfigOption TOTAL_WRITEBUFFER_RATE =
		key(buildKey("total.writebuffer.rate"))
		.defaultValue(PageConstants.DEFAULT_TOTAL_WRITEBUFFER_RATE)
		.withDescription("Ratio of memory usage for all writebuffers relative to JVM heap.");

	public static final ConfigOption WRITE_BUFFER_SIZE =
		key(buildKey("writebuffer.size"))
			.defaultValue("640kb")
			.withDescription("Amount of data for a region to store in memory before they are flushed to file.");

	public static final ConfigOption HUGE_PAGE_THRESHOLD =
		key(buildKey("huge.page.threshold"))
			.defaultValue(PageConstants.DEFAULT_HUGE_PAGE_SIZE_THRESHOLD)
			.withDescription("Page will be treated a huge page if it's size exceeds this threshold.");

	public static final ConfigOption TOTAL_HEAP_LOW_MARK_RATE =
		key(buildKey("total.heap.low_mark.rate"))
		.defaultValue(PageConstants.DEFAULT_TOTAL_HEAP_LOW_MARK_RATE)
		.withDescription("Try to fill sample pool if memory usage exceeds this watermark.");

	public static final ConfigOption TOTAL_HEAP_MIDDLE_MARK_RATE =
		key(buildKey("total.heap.middle_mark.rate"))
		.defaultValue(PageConstants.DEFAULT_TOTAL_HEAP_MIDDLE_MARK_RATE)
		.withDescription("Some pages will be flushed if memory usage exceeds this watermark.");

	public static final ConfigOption TOTAL_HEAP_HIGH_MARK_RATE =
		key(buildKey("total.heap.high_mark.rate"))
		.defaultValue(PageConstants.DEFAULT_TOTAL_HEAP_HIGH_MARK_RATE)
		.withDescription("Some pages will be evicted if memory usage exceeds this watermark.");

	public static final ConfigOption ALLOCATOR_DIRECT_ARENA =
		key(buildKey("allocator.direct.arena.num"))
		.defaultValue(8)
		.withDescription("Number of direct arenas for netty allocator.");

	// options for memory flush (from write buffer to page) =========================================================

	public static final ConfigOption FLUSHING_SEGMENT =
		key(buildKey("flushing.segment.num"))
		.defaultValue(3)
		.withDescription("Maximum number of flushing segment to allow for each region.");

	public static final ConfigOption FLUSHING_SEGMENT_RATIO = ConfigOptions.key(
		buildKey("flushing.segment.total.ratio"))
		.defaultValue(1.0f).withDescription(
			"This option limits the total number of allowed flushing segments in all regions together "
				+ "with " + FLUSHING_SEGMENT.key() + ". For example, if DB is responsible for 10 regions, "
				+ FLUSHING_SEGMENT.key() + " is set to 3 and this option is set to 1.0, total number of allowed flushing"
				+ "segments will be 30, but if this option is set to 0.5, total number of allowed flushing segments will be 15.");

	// options for memory cache =============================================================

	/**
	 * Notice for our own allocator in future, the data read from disk will directly enter the
	 * cache, so size of 1024 is enough. Before that, we can increase this num to improve performance,
	 * for example 10000.
	 */
	public static final ConfigOption READ_LRU_SIZE = ConfigOptions.key(
		buildKey("read.lru.size"))
		.defaultValue(1024)
		.withDescription("Size of LRU cache.");

	public static final ConfigOption MAX_PREPARED_FLUSH_SIZE =
		key(buildKey("max.prepared.flush.size"))
		.defaultValue(2097152)
		.withDescription("Max size of prepared pages, default is 2MB.");

	public static final ConfigOption EVICT_POOL_FACTOR =
		key(buildKey("evict.pool.factor"))
		.defaultValue(20)
		.withDescription("Ratio of pool size relative to " + MAX_PREPARED_FLUSH_SIZE.key() + " .");

	public static final ConfigOption CACHE_TIME_PER_TICK =
		key(buildKey("time.per.tick.second"))
		.defaultValue(20)
		.withDescription("Time in second per tick for scoring pages.");

	public static final ConfigOption SORTED_LIST_COUNT_FOR_FLUSH =
		key(buildKey("evict.page.pool.sorted.list.min.count")).defaultValue(16).
			withDescription("a sorted list is generated by sorting evict sample pool, " +
				"we don't use all of the items in this list for best-effort finding the colder page.");

	public static final ConfigOption VM_PRINT_TICK =
		key(buildKey("vm.print.tick"))
			.defaultValue(30)
			.withDescription("the number of ticks, after which to print the vm info.");

	public static final ConfigOption VM_PRINT_AUDIT_INFO =
		key(buildKey("vm.print.audit.info"))
			.defaultValue(false)
			.withDescription("for DEV debug.");

	public static final ConfigOption VM_EVICT_BASE_ON_COMPOSITE_PAGE_ADDRESS =
		key(buildKey("evict.base.on.composite.page.address"))
			.defaultValue(true)
			.withDescription("evict add PageAddressComposite into sample pool.");

	// options for split =============================================================

	public static final ConfigOption BUCKET_INIT_NUM =
		key(buildKey("bucket.init.num"))
		.defaultValue(PageConstants.DEFAULT_BUCKET_NUM)
		.withDescription("Initial number of buckets in page index.");

	public static final ConfigOption SPLIT_SIZE_THRESHOLD =
		key(buildKey("split.size.threshold"))
		.defaultValue(PageConstants.DEFAULT_SPLIT_PAGE_SIZE_THRESHOLD)
		.withDescription("Threshold of bucket size to split");

	public static final ConfigOption INDEX_COUNT_HIGH_MARK =
		key(buildKey("index.count.high.mark"))
		.defaultValue(PageConstants.DEFAULT_INDEX_COUNT_HIGH_MARK)
		.withDescription("Split will be forbidden if total number of buckets exceeds this watermark");

	public static final ConfigOption INDEX_COUNT_LOW_MARK =
		key(buildKey("index.count.low.mark"))
		.defaultValue(PageConstants.DEFAULT_INDEX_COUNT_LOW_MARK)
		.withDescription("Currently this watermark is just for monitor.");

	// options for page compaction =============================================================

	public static final ConfigOption COMPACTION_THREAD_NUM =
		key(buildKey("compaction.thread.num"))
		.defaultValue(4)
		.withDescription("Number of threads used to compaction pages.");

	public static final ConfigOption MINOR_COMPACTION_THRESHOLD =
		key(buildKey("minor.compaction.threshold"))
		.defaultValue(3).withDescription("A minor compaction will be triggered if the number of continuous"
			+ " in-memory pages in a logical page chain reaches the threshold.");

	public static final ConfigOption MINOR_COMPACTION_MAX_RUNNING =
		key(buildKey("minor.compaction.max-running"))
		.defaultValue(128)
		.withDescription("Max number of running minor compactions.");

	public static final ConfigOption MAJOR_COMPACTION_THRESHOLD = ConfigOptions.key(
		buildKey("major.compaction.threshold"))
		.defaultValue(5)
		.withDescription("A major compaction will be triggered if the number of pages in a" +
			" logical page chain reaches the threshold.");

	public static final ConfigOption MAJOR_COMPACTION_MAX_RUNNING =
		key(buildKey("major.compaction.max-running"))
		.defaultValue(8)
		.withDescription("Max number of running major compactions.");

	public static final ConfigOption LOGIC_CHAIN_INIT_LEN =
		key(buildKey("logical.page.chain.init.len"))
		.defaultValue(PageConstants.DEFAULT_LOGIC_TABLE_CHAIN_LEN)
		.withDescription("Initial length of logical page chain.");

	// options for lru cache ================================================================

	public static final ConfigOption LRU_INTO_MAIN_CACHE_THREAD_NUM = ConfigOptions.key(
		buildKey("lru.into.main-cache.thread.num"))
		.defaultValue(2)
		.withDescription("Thread number used to fetch data from LRU to main cache.");

	public static final ConfigOption LRU_INTO_MAIN_CACHE_SLEEP_MS = ConfigOptions.key(
		buildKey("lru.into.main-cache.sleep.ms"))
		.defaultValue(5_000)
		.withDescription("The minimal millsecond will sleep between two operations between fetch data from lru to main cache.");

	public static final ConfigOption LRU_ADD_INTO_MAIN_WHEN_SPLITTING = ConfigOptions.key(
		buildKey("lru.add-into-main.when.split"))
		.defaultValue(true)
		.withDescription("Whether fetch data from lru into main cache when pagechain is splitting.");

	public static final ConfigOption ENABLE_LRU_INTO_MAIN_CACHE = ConfigOptions.key(
		buildKey("enable.lru-into-main"))
		.defaultValue(false)
		.withDescription("Whether enable to load page from lru into main cache");

	public static final ConfigOption ENABLE_LRU_ACCESS_MODE = ConfigOptions.key(
		buildKey("enable.lru.access-mode"))
		.defaultValue(false)
		.withDescription("Whether change the lru cache order when access an entry in the cache");

	public static final ConfigOption LRU_CACHE_EVEN_EVICT = ConfigOptions.key(
		buildKey("enable.lru.even-evict"))
		.defaultValue(false)
		.withDescription("Whether evict all region evenly.");

	// options for file manager =============================================================

	public static final ConfigOption MAX_FILE_SIZE =
		key(buildKey("file.max-size"))
			.defaultValue("128mb")
			.withDescription("Maximum size of data file. File will be closed for write if it's size reaches this target.");

	public static final ConfigOption FILE_ALIVE_TIME_AFTER_NO_DATA_REFERENCE =
		key(buildKey("file.alive.time.after.no.data.reference.ms"))
		.defaultValue(30000L)
		.withDescription("File alive time after there is no data reference to it.");

	public static final ConfigOption FILE_DELETION_CHECK_INTERVAL =
		key(buildKey("file.deletion.check.interval.ms"))
		.defaultValue(20000L)
		.withDescription("Interval to check whether files should be deleted.");

	/**
	 * TODO #SR
	 *  1. Currently, we will disable creating new file writer after *all writer* failed only.
	 *     If we have 2 file writer in snapshotEventExecutorGroup, and one is good, the other
	 *     is bad, we will still create new file writer.
	 *  2. Do not support caching on DFS now.
	 */
	public static final ConfigOption FILE_FAIL_COUNT_THRESHOLD =
		key(buildKey("file.fail-count.threshold"))
		.defaultValue(3)
		.withDescription("Threshold of all file writers failed count, after exceeding the threshold we will not create " +
			"new file writer in a duration.");

	public static final ConfigOption FILE_RETRY_INTERVAL =
		key(buildKey("file.retry.interval.second"))
		.defaultValue(3600)
		.withDescription("Interval will retry to create new file writer after disabling create new file writer because of" +
			"exceed the threshold of file fail count threshold.");

	public static final ConfigOption WRITER_FAIL_COUNT_THRESHOLD =
		key(buildKey("writer.fail-count.threshold"))
		.defaultValue(5)
		.withDescription("File write will be invalid if number of write failures exceeds this threshold.");

	// options for file cleaner =============================================================

	public static final ConfigOption FILE_CLEAN_THREAD_NUM =
		key(buildKey("file.clean.thread.num"))
		.defaultValue(1)
		.withDescription("Number of threads to clean useless files, and only one thread is supported currently.");

	public static final ConfigOption FILE_CLEAN_CHECK_INTERVAL =
		key(buildKey("file.clean.check.interval.ms"))
		.defaultValue(60000L)
		.withDescription("Interval to check whether there are files to clean.");

	// options for compression =============================================================

	/**
	 * TODO currently changing compression is not supported, and in-page compression.
	 * only supports None or LZ4
	 */
	public static final ConfigOption PAGE_FLUSH_LOCAL_COMPRESSION =
		key(buildKey("page.flush.local.compression"))
		.defaultValue("None")
		.withDescription("Compression used when flushing pages to local files.");

	public static final ConfigOption PAGE_FLUSH_DFS_COMPRESSION =
		key(buildKey("page.flush.dfs.compression"))
		.defaultValue("Lz4")
		.withDescription("Compression used when flushing pages to dfs files.");

	public static final ConfigOption IN_PAGE_COMPRESSION =
		key(buildKey("compression.in.page"))
		.defaultValue("None")
		.withDescription("Compression used for in-memory pages.");

	public static final ConfigOption WHOLE_PAGE_COMPRESS_THRESHOLD =
		key(buildKey("page.compress.threshold"))
		.defaultValue(1024)
		.withDescription("Threshold of page size to decide whether to compress page "
			+ "when flushing to local or dfs files");

	public static final ConfigOption PAGE_SIZE_RATE_BETWEEN_POJO_HEAP = ConfigOptions.key(
		buildKey("pojo.memory.estimate.ratio"))
		.defaultValue(PageConstants.DEFAULT_PAGE_SIZE_RATE_BETWEEN_POJO_HEAP)
		.withDescription("Ratio of size between a pojo and it's serialized binary.");

	// options for map split =============================================================

	public static final ConfigOption DATA_PAGE_MAP_SPLIT_ENABLED =
		key(buildKey("page.map.split.enabled"))
		.defaultValue(true)
		.withDescription("Whether to enable map split.");

	public static final ConfigOption MAP_SPLIT_THRESHOLD_SIZE =
		key(buildKey("page.map.split.size.threshold"))
		.defaultValue("8KB")
		.withDescription("Threshold of map size to trigger a split.");

	public static final ConfigOption MAP_SPLIT_SUB_MAP_SIZE =
		key(buildKey("page.map.split.sub.map.size"))
		.defaultValue(-1)
		.withDescription("Sub-map size after splitting. -1 indicates that it will be half of " +
			MAP_SPLIT_THRESHOLD_SIZE.key() + ".");

	public static final ConfigOption MAP_SPLIT_MAX_SUB_MAP_NUM =
		key(buildKey("page.map.split.max.sub-maps"))
		.defaultValue(65536)
		.withDescription("Max number of sub maps when a map occur map split.");

	// options for file cache =============================================================

	public static final ConfigOption FILE_CACHE_TYPE =
		key(buildKey("file.cache.type"))
		.defaultValue(FileCache.FileCacheType.INFINITE.name())
		.withDescription("Type of file cache to use. " + FileCache.FileCacheType.NONE.name()
			+ " indicates that" + " do not use file cache, " + FileCache.FileCacheType.INFINITE.name()
			+ " indicates cache has infinite" + " capacity, and " + FileCache.FileCacheType.LIMITED.name()
			+ " indicates the capacity of file is limited");

	public static final ConfigOption FILE_CACHE_CAPACITY =
		key(buildKey("file.cache.capacity"))
			.noDefaultValue().withDescription(
			"Capacity of file cache. This option is useful only when type of file cache type"
				+ " is LIMITED, and will be ignored in other types");

	// options for file compaction ================================================

	public static final ConfigOption FILE_COMPACTION =
		key(buildKey("file.compaction"))
		.defaultValue(false)
		.withDescription("Option whether file compaction is enabled.");

	public static final ConfigOption FILE_COMPACTION_TRIGGER_RATIO =
		key(buildKey("file.compaction.trigger.ratio"))
		.defaultValue(4.0f)
		.withDescription("A compaction process will be triggered when amplification reaches this ratio");

	public static final ConfigOption FILE_COMPACTION_TARGET_RATIO =
		key(buildKey("file.compaction.target.ratio"))
		.defaultValue(2.0f)
		.withDescription("A compaction process will exit when amplification reduces to this ratio");

	public static final ConfigOption FILE_AMPLIFICATION_CHECK_INTERVAL_MS =
		key(buildKey("file.amplification.check.interval.ms"))
		.defaultValue(120000L)
		.withDescription("Interval in milliseconds to check the space amplification periodically.");

	// options for snapshot ================================================

	public static final ConfigOption SNAPSHOT_THREAD_NUM =
		key(buildKey("snapshot.thread.num"))
		.defaultValue(3)
		.withDescription("Number of threads used to flush pages to dfs for a snapshot.");

	public static final ConfigOption SNAPSHOT_COMPACTION =
		key(buildKey("snapshot.compaction"))
		.defaultValue(true)
		.withDescription("Option whether snapshot compaction is enabled.");

	public static final ConfigOption SNAPSHOT_COMPACTION_TARGET_RATIO =
		key(buildKey("snapshot.compaction.target.ratio"))
		.defaultValue(2f)
		.withDescription("The snapshot amplification should be no more than this ratio.");

	public static final ConfigOption SNAPSHOT_BATCH_FLUSH_NUM_PAGE =
		key(buildKey("snapshot.batch.flush.num.page"))
		.defaultValue(10)
		.withDescription("Number of pages in a batch to flush to DFS.");

	public static final ConfigOption SNAPSHOT_BATCH_FLUSH_DATA_SIZE =
		key(buildKey("snapshot.batch.flush.data.size"))
			.defaultValue("64kb") // dfs.write.packet.size is default 64K
			.withDescription("Size of data in a batch to flush to DFS. It works together with " +
				SNAPSHOT_BATCH_FLUSH_NUM_PAGE.key() + " to decide when to flush batch to DFS.");

	public static final ConfigOption SNAPSHOT_SYNC_WHEN_BATCH_FLUSH =
		key(buildKey("snapshot.sync.when.batch.flush"))
		.defaultValue(false)
		.withDescription("Whether to sync when batch flush.");

	// options for restore ================================================

	public static final ConfigOption RESTORE_FETCH_FILES =
		key(buildKey("restore.pre-fetch-files"))
		.defaultValue(false)
		.withDescription("Flag indicates whether Gemini should prefetch all files when restoring without local-recovery or first download.");

	public static final ConfigOption FETCH_FILES_THREAD_NUM =
		key(buildKey("restore.thread.num"))
		.defaultValue(2)
		.withDescription("The number of threads used to download files from DFS, only take effect when '" +
			RESTORE_FETCH_FILES.key() + "' is enabled.");

	// some unclassified options =============================================================

	public static final ConfigOption TTL =
		key(buildKey("ttl.ms"))
			.defaultValue(-1L)
			.withDescription("Time to live of state.");

	public static final ConfigOption COMPARATOR_TYPE =
		key(buildKey("comparator.type"))
			.defaultValue("bytes")
			.withDescription("Comparator type for SortedMapState. bytes indicates to use comparator" +
				" based on byte order, and user indicates a java comparator.");

	public static final ConfigOption READ_COPY =
		key(buildKey("read.copy"))
			.defaultValue(true)
			.withDescription("Whether to copy state when reading from write buffer.");

	public static final ConfigOption WRITE_COPY =
		key(buildKey("write.copy"))
			.defaultValue(true)
			.withDescription("Whether to copy state for state write");

	public static final ConfigOption THREAD_SLEEP_TIME_NS =
		key(buildKey("thread.sleep.ns"))
		.defaultValue(1000_000L)
		.withDescription("Used in GeminiEventExecutor to decide the interval to poll task queue.");

	public static final ConfigOption PREFETCH_ENABLE =
		key(buildKey("prefetch.enable"))
		.defaultValue(true)
		.withDescription("Whether to enable prefetch");

	public static final ConfigOption PREFETCH_THREAD_SLEEP_TIME_NS =
		key(buildKey("prefetch.sleep.ns"))
		.defaultValue(50L)
		.withDescription("Interval to poll tasks for prefetch event executor.");

	public static final ConfigOption CHECKSUM_ENABLE =
		key(buildKey("checksum.enable"))
		.defaultValue(true)
		.withDescription("Whether to enable checksum.");

	private static String buildKey(String name) {
		return KEY_PREFIX_NAME + "." + name;
	}

	public static final ConfigOption PERSISTENCE_BATCH_SIZE = ConfigOptions.key(buildKey(
		"persistence.batch.size")).defaultValue(4 * 1024 * 1024).
		withDescription("the batch size when persisting page to dfs.");

	public static final ConfigOption PERSISTENCE_FORCE_CACHE_SIZE = ConfigOptions.key(buildKey(
		"persistence.force.cache.size")).defaultValue(2 * 1024 * 1024).
		withDescription("the size when persisting page to cache.");

	public static final ConfigOption PERSISTENCE_MAX_RUNNING_TASK = ConfigOptions
		.key(buildKey("persistence.max.running.task"))
		.defaultValue(2)
		.withDescription("max persistence running task number.");

	public static final ConfigOption PERSISTENCE_TYPE = ConfigOptions
		.key(buildKey("persistence.type"))
		.defaultValue("bothDfsAndSyncHugeToCache")
		.withDescription("set persistence type: off|asyncDfs|syncHugeToCache|bothDfsAndSyncHugeToCache(default).");

	public static final ConfigOption VM_BLOOM_FILTER_MEM_RATE = ConfigOptions.key(
		buildKey("vm.bloom.filter.mem.rate")).defaultValue(0.05f).withDescription("bloom filter memory.");

	public static final ConfigOption VM_BLOOM_FILTER_ENABLE = ConfigOptions.key(
		buildKey("vm.bloom.filter.enable")).defaultValue(false).withDescription("enable bloom filter.");
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy