All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.sleepycat.je.EnvironmentStats Maven / Gradle / Ivy

The newest version!
/*-
 * Copyright (C) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
 *
 * This file was distributed by Oracle as part of a version of Oracle Berkeley
 * DB Java Edition made available at:
 *
 * http://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html
 *
 * Please see the LICENSE file included in the top-level directory of the
 * appropriate version of Oracle Berkeley DB Java Edition for a copy of the
 * license and additional information.
 */

package com.sleepycat.je;

import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_ACTIVE_LOG_SIZE;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_AVAILABLE_LOG_SIZE;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_BIN_DELTAS_CLEANED;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_BIN_DELTAS_DEAD;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_BIN_DELTAS_MIGRATED;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_BIN_DELTAS_OBSOLETE;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_DELETIONS;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_DISK_READS;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_ENTRIES_READ;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_INS_CLEANED;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_INS_DEAD;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_INS_MIGRATED;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_INS_OBSOLETE;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNQUEUE_HITS;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_CLEANED;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_DEAD;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_EXPIRED;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_EXTINCT;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_LOCKED;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_MARKED;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_MIGRATED;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_LNS_OBSOLETE;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_MAX_UTILIZATION;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_MIN_UTILIZATION;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PENDING_DBS_INCOMPLETE;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PENDING_DBS_PROCESSED;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PENDING_DB_QUEUE_SIZE;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PENDING_LNS_LOCKED;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PENDING_LNS_PROCESSED;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PENDING_LN_QUEUE_SIZE;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PREDICTED_MAX_UTILIZATION;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PREDICTED_MIN_UTILIZATION;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PROTECTED_LOG_SIZE;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_PROTECTED_LOG_SIZE_MAP;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_RESERVED_LOG_SIZE;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_REVISAL_RUNS;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_RUNS;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_TOTAL_LOG_SIZE;
import static com.sleepycat.je.cleaner.CleanerStatDefinition.CLEANER_TWO_PASS_RUNS;
import static com.sleepycat.je.dbi.BTreeStatDefinition.BT_OP_BIN_DELTA_DELETES;
import static com.sleepycat.je.dbi.BTreeStatDefinition.BT_OP_BIN_DELTA_GETS;
import static com.sleepycat.je.dbi.BTreeStatDefinition.BT_OP_BIN_DELTA_INSERTS;
import static com.sleepycat.je.dbi.BTreeStatDefinition.BT_OP_BIN_DELTA_UPDATES;
import static com.sleepycat.je.dbi.BTreeStatDefinition.BT_OP_RELATCHES_REQUIRED;
import static com.sleepycat.je.dbi.BTreeStatDefinition.BT_OP_ROOT_SPLITS;
import static com.sleepycat.je.dbi.DbiStatDefinition.BACKUP_COPY_FILES_COUNT;
import static com.sleepycat.je.dbi.DbiStatDefinition.BACKUP_COPY_FILES_MS;
import static com.sleepycat.je.dbi.DbiStatDefinition.ENV_CREATION_TIME;
import static com.sleepycat.je.dbi.DbiStatDefinition.MB_ADMIN_BYTES;
import static com.sleepycat.je.dbi.DbiStatDefinition.MB_DATA_BYTES;
import static com.sleepycat.je.dbi.DbiStatDefinition.MB_DOS_BYTES;
import static com.sleepycat.je.dbi.DbiStatDefinition.MB_LOCK_BYTES;
import static com.sleepycat.je.dbi.DbiStatDefinition.MB_SHARED_CACHE_TOTAL_BYTES;
import static com.sleepycat.je.dbi.DbiStatDefinition.MB_TOTAL_BYTES;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_DELETE;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_DELETE_FAIL;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_INSERT;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_INSERT_FAIL;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_POSITION;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_SEARCH;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_SEARCH_FAIL;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_PRI_UPDATE;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_DELETE;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_INSERT;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_POSITION;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_SEARCH;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_SEARCH_FAIL;
import static com.sleepycat.je.dbi.DbiStatDefinition.THROUGHPUT_SEC_UPDATE;
import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_DELTA_BLIND_OPS;
import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_DELTA_FETCH_MISS;
import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_FETCH;
import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_FETCH_MISS;
import static com.sleepycat.je.evictor.EvictorStatDefinition.BIN_FETCH_MISS_RATIO;
import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_BINS;
import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_BIN_DELTAS;
import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_IN_COMPACT_KEY;
import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_IN_NO_TARGET;
import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_IN_SPARSE_TARGET;
import static com.sleepycat.je.evictor.EvictorStatDefinition.CACHED_UPPER_INS;
import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_DIRTY_NODES_EVICTED;
import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_EVICTION_RUNS;
import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_LNS_EVICTED;
import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_EVICTED;
import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_MOVED_TO_PRI2_LRU;
import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_MUTATED;
import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_PUT_BACK;
import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_SKIPPED;
import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_STRIPPED;
import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_NODES_TARGETED;
import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_ROOT_NODES_EVICTED;
import static com.sleepycat.je.evictor.EvictorStatDefinition.EVICTOR_SHARED_CACHE_ENVS;
import static com.sleepycat.je.evictor.EvictorStatDefinition.FULL_BIN_MISS;
import static com.sleepycat.je.evictor.EvictorStatDefinition.LN_FETCH;
import static com.sleepycat.je.evictor.EvictorStatDefinition.LN_FETCH_MISS;
import static com.sleepycat.je.evictor.EvictorStatDefinition.PRI1_LRU_SIZE;
import static com.sleepycat.je.evictor.EvictorStatDefinition.PRI2_LRU_SIZE;
import static com.sleepycat.je.evictor.EvictorStatDefinition.THREAD_UNAVAILABLE;
import static com.sleepycat.je.evictor.EvictorStatDefinition.UPPER_IN_FETCH;
import static com.sleepycat.je.evictor.EvictorStatDefinition.UPPER_IN_FETCH_MISS;
import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_CURSORS_BINS;
import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_DBCLOSED_BINS;
import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_NON_EMPTY_BINS;
import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_PROCESSED_BINS;
import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_QUEUE_SIZE;
import static com.sleepycat.je.incomp.INCompStatDefinition.INCOMP_SPLIT_BINS;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_BYTES_READ_FROM_WRITEQUEUE;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_FILE_OPENS;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_FSYNC_95_MS;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_FSYNC_99_MS;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_FSYNC_AVG_MS;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_FSYNC_MAX_MS;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_LOG_FSYNCS;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_OPEN_FILES;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_RANDOM_READS;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_RANDOM_READ_BYTES;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_RANDOM_WRITES;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_RANDOM_WRITE_BYTES;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_READS_FROM_WRITEQUEUE;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_SEQUENTIAL_READS;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_SEQUENTIAL_READ_BYTES;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_SEQUENTIAL_WRITES;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_SEQUENTIAL_WRITE_BYTES;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_WRITEQUEUE_OVERFLOW;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES;
import static com.sleepycat.je.log.LogStatDefinition.FILEMGR_WRITES_FROM_WRITEQUEUE;
import static com.sleepycat.je.log.LogStatDefinition.FSYNCMGR_FSYNCS;
import static com.sleepycat.je.log.LogStatDefinition.FSYNCMGR_FSYNC_REQUESTS;
import static com.sleepycat.je.log.LogStatDefinition.FSYNCMGR_N_GROUP_COMMIT_REQUESTS;
import static com.sleepycat.je.log.LogStatDefinition.FSYNCMGR_TIMEOUTS;
import static com.sleepycat.je.log.LogStatDefinition.LBFP_BUFFER_BYTES;
import static com.sleepycat.je.log.LogStatDefinition.LBFP_LOG_BUFFERS;
import static com.sleepycat.je.log.LogStatDefinition.LBFP_MISS;
import static com.sleepycat.je.log.LogStatDefinition.LBFP_NOT_RESIDENT;
import static com.sleepycat.je.log.LogStatDefinition.LBFP_NO_FREE_BUFFER;
import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_END_OF_LOG;
import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_REPEAT_FAULT_READS;
import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_REPEAT_ITERATOR_READS;
import static com.sleepycat.je.log.LogStatDefinition.LOGMGR_TEMP_BUFFER_WRITES;
import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_CHECKPOINTS;
import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_DELTA_IN_FLUSH;
import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_FULL_BIN_FLUSH;
import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_FULL_IN_FLUSH;
import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_LAST_CKPTID;
import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_LAST_CKPT_END;
import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_LAST_CKPT_INTERVAL;
import static com.sleepycat.je.recovery.CheckpointStatDefinition.CKPT_LAST_CKPT_START;
import static com.sleepycat.je.txn.LockStatDefinition.LOCK_OWNERS;
import static com.sleepycat.je.txn.LockStatDefinition.LOCK_READ_LOCKS;
import static com.sleepycat.je.txn.LockStatDefinition.LOCK_REQUESTS;
import static com.sleepycat.je.txn.LockStatDefinition.LOCK_TOTAL;
import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WAITERS;
import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WAITS;
import static com.sleepycat.je.txn.LockStatDefinition.LOCK_WRITE_LOCKS;

import java.io.Serializable;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;

import com.sleepycat.je.Durability.SyncPolicy;
import com.sleepycat.je.cleaner.CleanerStatDefinition;
import com.sleepycat.je.cleaner.EraserStatDefinition;
import com.sleepycat.je.dbi.BTreeStatDefinition;
import com.sleepycat.je.dbi.DbiStatDefinition;
import com.sleepycat.je.evictor.Evictor.EvictionSource;
import com.sleepycat.je.evictor.EvictorStatDefinition;
import com.sleepycat.je.evictor.OffHeapStatDefinition;
import com.sleepycat.je.incomp.INCompStatDefinition;
import com.sleepycat.je.log.LogStatDefinition;
import com.sleepycat.je.recovery.CheckpointStatDefinition;
import com.sleepycat.je.txn.LockStatDefinition;
import com.sleepycat.je.util.DbBackup;
import com.sleepycat.je.util.DbCacheSize;
import com.sleepycat.je.utilint.StatGroup;
import com.sleepycat.je.utilint.TaskCoordinator;

/**
 * Statistics for a single environment. Statistics provide indicators for
 * system monitoring and performance tuning.
 *
 * 

Each statistic has a name and a getter method in this class. For example, * the {@code cacheTotalBytes} stat is returned by the {@link * #getCacheTotalBytes()} method. Statistics are categorized into several * groups, for example, {@code cacheTotalBytes} is in the {@code Cache} * group. Each stat and group has a name and a description.

* *

Viewing the statistics through {@link #toString()} shows the stat names * and values organized by group. Viewing the stats with {@link * #toStringVerbose()} additionally shows the description of each stat and * group.

* *

Statistics are periodically output in CSV format to the je.stat.csv file * (see {@link EnvironmentConfig#STATS_COLLECT}). The column header in the .csv * file has {@code group:stat} format, where 'group' is the group name and * 'stat' is the stat name. In Oracle NoSQL DB, in the addition to the .csv * file, JE stats are output in the .stat files.

* *

Stat values may also be obtained via JMX using the JEMonitor mbean. * In Oracle NoSQL DB, JE stats are obtained via a different JMX interface in * JSON format. The JSON format uses property names of the form {@code * group_stat} where 'group' is the group name and 'stat' is the stat name.

* *

The stat groups are listed below. Each group name links to a summary of * the statistics in the group.

* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
Group NameDescription
{@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} * {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_DESC} *
{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} * {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_DESC} *
{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} * {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_DESC} *
{@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} * {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_DESC} *
{@value * com.sleepycat.je.incomp.INCompStatDefinition#GROUP_NAME} * {@value * com.sleepycat.je.incomp.INCompStatDefinition#GROUP_DESC} *
{@value * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_NAME} * {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_DESC} *
{@value * com.sleepycat.je.txn.LockStatDefinition#GROUP_NAME} * {@value * com.sleepycat.je.txn.LockStatDefinition#GROUP_DESC} *
{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} * {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_DESC} *
{@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_GROUP_NAME} * {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_GROUP_DESC} *
{@value * com.sleepycat.je.dbi.DbiStatDefinition#ENV_GROUP_NAME} * {@value * com.sleepycat.je.dbi.DbiStatDefinition#ENV_GROUP_DESC} *
* *

The following sections describe each group of stats along with some * common strategies for using them for monitoring and performance tuning.

* *

Cache Statistics

* *

Group Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Description: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_DESC}

* *

Group Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Description: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_DESC}

* *

The JE cache consists of the main (in-heap) cache and and optional * off-heap cache. The vast majority of the cache is occupied by Btree nodes, * including internal nodes (INs) and leaf nodes (LNs). INs contain record keys * while each LN contain a single record's key and data.

* *

Each IN refers to a configured maximum number of child nodes ({@link * EnvironmentConfig#NODE_MAX_ENTRIES}). The INs form a Btree of at least 2 * levels. With a large data set the Btree will normally have 4 or 5 levels. * The top level is a single node, the root IN. Levels are numbered from the * bottom up, starting with level 1 for bottom level INs (BINs). Levels are * added at the top when the root IN splits.

* *

When an off-heap cache is configured, it serves as an overflow for the * main cache. See {@link EnvironmentConfig#MAX_OFF_HEAP_MEMORY}.

* *

Cache Statistics: Sizing

* *

Operation performance is often directly proportional to how much of the * active data set is cached. BINs and LNs form the vast majority of the cache. * Caching of BINs and LNs has different performance impacts, and behavior * varies depending on whether an off-heap cache is configured and which {@link * CacheMode} is used.

* *

Main cache current usage is indicated by the following stats. Note that * there is currently no stat for the number of LNs in the main cache.

* * * * * * * * * * *
{@link #getCacheTotalBytes} * {@value com.sleepycat.je.dbi.DbiStatDefinition#MB_TOTAL_BYTES_DESC} *
{@link #getNCachedBINs} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_BINS_DESC} *
{@link #getNCachedBINDeltas} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_BIN_DELTAS_DESC} *
{@link #getNCachedUpperINs} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_UPPER_INS_DESC} *
* *

Off-heap cache current usage is indicated by:

* * * * * * * * * * *
{@link #getOffHeapTotalBytes} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#TOTAL_BYTES_NAME} *
{@link #getOffHeapCachedLNs} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_LNS_DESC} *
{@link #getOffHeapCachedBINs} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_BINS_DESC} *
{@link #getOffHeapCachedBINDeltas} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_BIN_DELTAS_DESC} *
* *

A cache miss is considered a miss only when the object is not found in * either cache. Misses often result in file I/O and are a primary indicator * of cache performance. Fetches (access requests) and misses are indicated * by:

* * * * * * * * * * * * * * * * * * *
{@link #getNLNsFetch} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#LN_FETCH_DESC} *
{@link #getNLNsFetchMiss} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#LN_FETCH_MISS_DESC} *
{@link #getNBINsFetch} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_DESC} *
{@link #getNBINsFetchMiss} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_MISS_DESC} *
{@link #getNBINDeltasFetchMiss} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#BIN_DELTA_FETCH_MISS_DESC} *
{@link #getNFullBINsMiss} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#FULL_BIN_MISS_DESC} *
{@link #getNUpperINsFetch} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#UPPER_IN_FETCH_DESC} *
{@link #getNUpperINsFetchMiss} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#UPPER_IN_FETCH_MISS_DESC} *
* *

When the number of LN misses ({@code nLNsFetchMiss}) or the number of * BIN misses ({@code nBINsFetchMiss + nFullBINsMiss}) are significant, the * JE cache may be undersized, as discussed below. But note that it is not * practical to correlate the number of fetches and misses directly to * application operations, because LNs are sometimes * {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN embedded}, BINs are sometimes * accessed multiple times per operation, and internal Btree accesses are * included in the stat values.

* *

Ideally, all BINs and LNs for the active data set should fit in cache so * that operations do not result in fetch misses, which often perform random * read I/O. When this is not practical, which is often the case for large * data sets, the next best thing is to ensure that all BINs fit in cache, * so that an operation will perform at most one random read I/O to fetch * the LN. The {@link DbCacheSize} javadoc describes how to size the cache * to ensure that all BINs and/or LNs fit in cache.

* *

Normally {@link EnvironmentConfig#MAX_MEMORY_PERCENT} determines the JE * cache size as a value relative to the JVM heap size, i.e., the heap size * determines the cache size.

* *

For configuring cache size and behavior, see:

*
    *
  • {@link EnvironmentConfig#MAX_MEMORY_PERCENT}
  • *
  • {@link EnvironmentConfig#MAX_MEMORY}
  • *
  • {@link EnvironmentConfig#MAX_OFF_HEAP_MEMORY}
  • *
  • {@link EnvironmentConfig#setCacheMode(CacheMode)}
  • *
  • {@link CacheMode}
  • *
  • {@link DbCacheSize}
  • *
* *

When using Oracle NoSQL DB, a sizing exercise and {@link DbCacheSize} are * used to determine the cache size needed to hold all BINs in memory. The * memory available to each node is divided between a 32 GB heap for the JVM * process (so that CompressedOops may be used) and the off-heap cache (when * more than 32 GB of memory is available).

* *

It is also important not to configured the cache size too large, relative * to the JVM heap size. If there is not enough free space in the heap, Java * GC pauses may become a problem. Increasing the default value for {@code * MAX_MEMORY_PERCENT}, or setting {@code MAX_MEMORY} (which overrides {@code * MAX_MEMORY_PERCENT}), should be done carefully.

* *

Java GC performance may also be improved by using {@link * CacheMode#EVICT_LN}. Record data sizes should also be kept below 1 MB to * avoid "humongous objects" (see Java GC documentation).

* *

When using Oracle NoSQL DB, by default, {@code MAX_MEMORY_PERCENT} is * set to 70% and {@link CacheMode#EVICT_LN} is used. The LOB (large object) * API is implemented using multiple JE records per LOB where the data size of * each record is 1 MB or less.

* *

When a shared cache is configured, the main and off-heap cache may be * shared by multiple JE Environments in a single JVM process. See:

*
    *
  • {@link EnvironmentConfig#SHARED_CACHE}
  • *
  • {@link #getSharedCacheTotalBytes()}
  • *
  • {@link #getNSharedCacheEnvironments()}
  • *
* *

When using Oracle NoSQL DB, the JE shared cache feature is not used * because each node only uses a single JE Environment.

* *

Cache Statistics: Size * Optimizations

* *

Since a large portion of an IN consists of record keys, JE uses * {@link DatabaseConfig#setKeyPrefixing(boolean) key prefix compression}. * Ideally, key suffixes are small enough to be stored using the {@link * EnvironmentConfig#TREE_COMPACT_MAX_KEY_LENGTH compact key format}. The * following stat indicates the number of INs using this compact format:

* * * * *
{@link #getNINCompactKeyIN} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_COMPACT_KEY_DESC} *
* *

Configuration params impacting key prefixing and the compact key format * are:

*
    *
  • {@link DatabaseConfig#setKeyPrefixing(boolean)}
  • *
  • {@link EnvironmentConfig#TREE_COMPACT_MAX_KEY_LENGTH}
  • *
* *

Enabling key prefixing for all databases is strongly recommended. When * using Oracle NoSQL DB, key prefixing is always enabled.

* *

Another configuration param impacting BIN cache size is {@code * TREE_MAX_EMBEDDED_LN}. There is currently no stat indicating the number of * embedded LNs. See:

*
    *
  • {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN}
  • *
* *

Cache Statistics: Unexpected * Sizes

* *

Although the Btree normally occupies the vast majority of the cache, it * is possible that record locks occupy unexpected amounts of cache when * large transactions are used, or when cursors or transactions are left open * due to application bugs. The following stat indicates the amount of cache * used by record locks:

* * * * *
{@link #getLockBytes()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#MB_LOCK_BYTES_DESC} *
* *

To reduce the amount of memory used for record locks:

*
    *
  • Use a small number of write operations per transaction. Write * locks are held until the end of a transaction.
  • *
  • For transactions using Serializable isolation or RepeatableRead * isolation (the default), use a small number of read operations per * transaction.
  • *
  • To read large numbers of records, use {@link * LockMode#READ_COMMITTED} isolation or use a null Transaction (which * implies ReadCommitted). With ReadCommitted isolation, locks are * released after each read operation. Using {@link * LockMode#READ_UNCOMMITTED} will also avoid record locks, but does not * provide any transactional guarantees.
  • *
  • Ensure that all cursors and transactions are closed * promptly.
  • *
* *

Note that the above guidelines are also important for reducing contention * when records are accessed concurrently from multiple threads and * transactions. When using Oracle NoSQL DB, the application should avoid * performing a large number of write operations in a single request. For read * operations, NoSQL DB uses ReadCommitted isolation to avoid accumulation of * locks.

* *

Another unexpected use of cache is possible when using a {@link * DiskOrderedCursor} or when calling {@link Database#count()}. The amount of * cache used by these operations is indicated by:

* * * * *
{@link #getDOSBytes} * {@value com.sleepycat.je.dbi.DbiStatDefinition#MB_DOS_BYTES_DESC} *
* *

{@code DiskOrderedCursor} and {@code Database.count} should normally be * explicitly constrained to use a maximum amount of cache memory. See:

*
    *
  • {@link DiskOrderedCursorConfig#setInternalMemoryLimit(long)}
  • *
  • {@link Database#count(long)}
  • *
* *

Oracle NoSQL DB does not currently use {@code DiskOrderedCursor} or * {@code Database.count}.

* *

Cache Statistics: Eviction

* *

Eviction is removal of Btree node from the cache in order to make room * for newly added nodes. See {@link CacheMode} for a description of * eviction.

* *

Normally eviction is performed via background threads in the eviction * thread pools. Disabling the eviction pool threads is not recommended.

*
    *
  • {@link EnvironmentConfig#ENV_RUN_EVICTOR}
  • *
  • {@link EnvironmentConfig#ENV_RUN_OFFHEAP_EVICTOR}
  • *
* *

Eviction stats are important indicator of cache efficiency and provide a * deeper understanding of cache behavior. Main cache eviction is indicated * by:

* * * * * * * * * * *
{@link #getNLNsEvicted} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_LNS_EVICTED_DESC} *
{@link #getNNodesMutated} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_MUTATED_DESC} *
{@link #getNNodesEvicted} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_EVICTED_DESC} *
{@link #getNDirtyNodesEvicted} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_DIRTY_NODES_EVICTED_DESC} *
* *

Note that objects evicted from the main cache are moved to the off-heap * cache whenever possible.

* *

Off-heap cache eviction is indicated by:

* * * * * * * * * * *
{@link #getOffHeapLNsEvicted} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_EVICTED_DESC} *
{@link #getOffHeapNodesMutated} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_MUTATED_DESC} *
{@link #getOffHeapNodesEvicted} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_EVICTED_DESC} *
{@link #getOffHeapDirtyNodesEvicted} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#DIRTY_NODES_EVICTED_DESC} *
* *

When analyzing Java GC performance, the most relevant stats are {@code * NLNsEvicted}, {@code NNodesMutated} and {@code NNodesEvicted}, which all * indicate eviction from the main cache based on LRU. Large values for these * stats indicate that many old generation Java objects are being GC'd, which * is often a cause of GC pauses.

* *

Note that {@link CacheMode#EVICT_LN} is used or when LNs are {@link * EnvironmentConfig#TREE_MAX_EMBEDDED_LN embedded}, {@code NLNsEvicted} will * be close to zero because LNs are not evicted based on LRU. And if an * off-heap cache is configured, {@code NNodesMutated} will be close to zero * because BIN mutation takes place in the off-heap cache. If any of the three * values are large, this points to a potential GC performance problem. The GC * logs should be consulted to confirm this.

* *

Large values for {@code NDirtyNodesEvicted} or {@code * OffHeapDirtyNodesEvicted} indicate that the cache is severely undersized and * there is a risk of using all available disk space and severe performance * problems. Dirty nodes are evicted last (after evicting all non-dirty nodes) * because they must be written to disk. This causes excessive writing and JE * log cleaning may be unproductive.

* *

Note that when an off-heap cache is configured, {@code * NDirtyNodesEvicted} will be zero because dirty nodes in the main cache are * moved to the off-heap cache if they don't fit in the main cache, and are * evicted completely and written to disk only when they don't fit in the * off-heap cache.

* *

Another type of eviction tuning for the main cache involves changing the * number of bytes evicted each time an evictor thread is awoken:

*
    *
  • {@link EnvironmentConfig#EVICTOR_EVICT_BYTES}
  • *
* *

If the number of bytes is too large, it may cause a noticeable spike in * eviction activity, reducing resources available to other threads. If the * number of bytes is too small, the overhead of waking the evictor threads * more often may be noticeable. The default values for this parameter is * generally a good compromise. This parameter also impacts critical eviction, * which is described next.

* *

Note that the corresponding parameter for the off-heap cache, {@link * EnvironmentConfig#OFFHEAP_EVICT_BYTES}, works differently and is described * in the next section.

* *

Cache Statistics: Critical * Eviction

* *

The following stats indicate that critical eviction is occurring:

* * * * * * * * * * * * * * * * *
{@link #getNBytesEvictedCritical} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_CRITICAL_DESC} *
{@link #getNBytesEvictedCacheMode} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_CACHEMODE_DESC} *
{@link #getNBytesEvictedDeamon} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_DAEMON_DESC} *
{@link #getNBytesEvictedEvictorThread} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_EVICTORTHREAD_DESC} *
{@link #getNBytesEvictedManual} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_MANUAL_DESC} *
{@link #getOffHeapCriticalNodesTargeted} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#CRITICAL_NODES_TARGETED_DESC} *
{@link #getOffHeapNodesTargeted} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_TARGETED_DESC} *
* *

Eviction is performed by eviction pool threads, calls to {@link * Environment#evictMemory()} in application background threads, or via {@link * CacheMode#EVICT_LN} or {@link CacheMode#EVICT_BIN}. If these mechanisms are * not sufficient to evict memory from cache as quickly as CRUD operations are * adding memory to cache, then critical eviction comes into play. Critical * eviction is performed in-line in the thread performing the CRUD operation, * which is very undesirable since it increases operation latency.

* *

Critical eviction in the main cache is indicated by large values for * {@code NBytesEvictedCritical}, as compared to the other {@code * NBytesEvictedXXX} stats. Critical eviction in the off-heap cache is * indicated by large values for {@code OffHeapCriticalNodesTargeted} compared * to {@code OffHeapNodesTargeted}.

* *

Additional stats indicating that background eviction threads may be * insufficient are:

* * * * * * *
{@link #getNThreadUnavailable} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#THREAD_UNAVAILABLE_DESC} *
{@link #getOffHeapThreadUnavailable} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#THREAD_UNAVAILABLE_DESC} *
* *

Critical eviction can sometimes be reduced by changing {@link * EnvironmentConfig#EVICTOR_CRITICAL_PERCENTAGE} or modifying the eviction * thread pool parameters.

*
    *
  • {@link EnvironmentConfig#EVICTOR_CRITICAL_PERCENTAGE}
  • *
  • {@link EnvironmentConfig#EVICTOR_CORE_THREADS}
  • *
  • {@link EnvironmentConfig#EVICTOR_MAX_THREADS}
  • *
  • {@link EnvironmentConfig#EVICTOR_KEEP_ALIVE}
  • *
  • {@link EnvironmentConfig#OFFHEAP_CORE_THREADS}
  • *
  • {@link EnvironmentConfig#OFFHEAP_MAX_THREADS}
  • *
  • {@link EnvironmentConfig#OFFHEAP_KEEP_ALIVE}
  • *
* *

When using Oracle NoSQL DB, {@code EVICTOR_CRITICAL_PERCENTAGE} is set to * 20% rather than using the JE default of 0%.

* *

In the main cache, critical eviction uses the same parameter as * background eviction for determining how many bytes to evict at one * time:

*
    *
  • {@link EnvironmentConfig#EVICTOR_EVICT_BYTES}
  • *
* *

Be careful when increasing this value, since this will cause longer * operation latencies when critical eviction is occurring in the main * cache.

* *

The corresponding parameter for the off-heap cache, {@code * OFFHEAP_EVICT_BYTES}, works differently:

*
    *
  • {@link EnvironmentConfig#OFFHEAP_EVICT_BYTES}
  • *
* *

Unlike in the main cache, {@code OFFHEAP_EVICT_BYTES} defines the goal * for background eviction to be below {@code MAX_OFF_HEAP_MEMORY}. The * background evictor threads for the off-heap cache attempt to maintain the * size of the off-heap cache at {@code MAX_OFF_HEAP_MEMORY - * OFFHEAP_EVICT_BYTES}. If the off-heap cache size grows larger than {@code * MAX_OFF_HEAP_MEMORY}, critical off-heap eviction will occur. The default * value for {@code OFFHEAP_EVICT_BYTES} is fairly large to ensure that * critical eviction does not occur. Be careful when lowering this value.

* *

This approach is intended to prevent the off-heap cache from exceeding * its maximum size. If the maximum is exceeded, there is a danger that the * JVM process will be killed by the OS. See {@link * #getOffHeapAllocFailures()}.

* *

Cache Statistics: LRU List * Contention

* *

Another common tuning issue involves thread contention on the cache LRU * lists, although there is no stat to indicate such contention. Since each * time a node is accessed it must be moved to the end of the LRU list, a * single LRU list would cause contention among threads performing CRUD * operations. By default there are 4 LRU lists for each cache. If contention * is noticeable on internal Evictor.LRUList or OffHeapCache.LRUList methods, * consider increasing the number of LRU lists:

*
    *
  • {@link EnvironmentConfig#EVICTOR_N_LRU_LISTS}
  • *
  • {@link EnvironmentConfig#OFFHEAP_N_LRU_LISTS}
  • *
* *

However, note that increasing the number of LRU lists will decrease the * accuracy of the LRU.

* *

Cache Statistics: Debugging

* *

The following cache stats are unlikely to be needed for monitoring or * tuning, but are sometimes useful for debugging and testing.

* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
{@link #getDataBytes} * {@value com.sleepycat.je.dbi.DbiStatDefinition#MB_DATA_BYTES_DESC} *
{@link #getAdminBytes} * {@value com.sleepycat.je.dbi.DbiStatDefinition#MB_ADMIN_BYTES_DESC} *
{@link #getNNodesTargeted} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_TARGETED_DESC} *
{@link #getNNodesStripped} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_STRIPPED_DESC} *
{@link #getNNodesPutBack} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_PUT_BACK_DESC} *
{@link #getNNodesMovedToDirtyLRU} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_MOVED_TO_PRI2_LRU_DESC} *
{@link #getNNodesSkipped} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_SKIPPED_DESC} *
{@link #getNRootNodesEvicted} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_ROOT_NODES_EVICTED_DESC} *
{@link #getNBINsFetchMissRatio} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_MISS_RATIO_DESC} *
{@link #getNINSparseTarget} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_SPARSE_TARGET_DESC} *
{@link #getNINNoTarget} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_NO_TARGET_DESC} *
{@link #getMixedLRUSize} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#PRI1_LRU_SIZE_DESC} *
{@link #getDirtyLRUSize} * {@value com.sleepycat.je.evictor.EvictorStatDefinition#PRI2_LRU_SIZE_DESC} *
{@link #getOffHeapAllocFailures} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#ALLOC_FAILURE_DESC} *
{@link #getOffHeapAllocOverflows} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#ALLOC_OVERFLOW_DESC} *
{@link #getOffHeapNodesStripped} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_STRIPPED_DESC} *
{@link #getOffHeapNodesSkipped} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_SKIPPED_DESC} *
{@link #getOffHeapLNsLoaded} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_LOADED_DESC} *
{@link #getOffHeapLNsStored} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_STORED_DESC} *
{@link #getOffHeapBINsLoaded} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#BINS_LOADED_DESC} *
{@link #getOffHeapBINsStored} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#BINS_STORED_DESC} *
{@link #getOffHeapTotalBlocks} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#TOTAL_BLOCKS_DESC} *
{@link #getOffHeapLRUSize} * {@value com.sleepycat.je.evictor.OffHeapStatDefinition#LRU_SIZE_DESC} *
* *

Likewise, the following cache configuration params are unlikely to be * needed for tuning, but are sometimes useful for debugging and testing.

*
    *
  • {@link EnvironmentConfig#ENV_DB_EVICTION}
  • *
  • {@link EnvironmentConfig#TREE_MIN_MEMORY}
  • *
  • {@link EnvironmentConfig#EVICTOR_FORCED_YIELD}
  • *
  • {@link EnvironmentConfig#EVICTOR_ALLOW_BIN_DELTAS}
  • *
  • {@link EnvironmentConfig#OFFHEAP_CHECKSUM}
  • *
* *

Cleaning Statistics

* *

Group Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Description: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_DESC}

* *

The JE cleaner is responsible for "disk garbage collection" within * JE's log structured (append only) storage system. Data files (.jdb files), * which are also called log files, are cleaned and deleted as their * contents become obsolete. See this * * introduction to JE data files.

* *

Cleaning Statistics: Utilization

* *

By utilization we mean the ratio of utilized size to the total size of * the active data files. The cleaner is run * when overall utilization (for all active files) drops below the target * utilization, which is specified by {@link * EnvironmentConfig#CLEANER_MIN_UTILIZATION}. The cleaner attempts to * maintain overall utilization at the target level. In addition, a file * will be cleaned if its individual utilization drops below {@link * EnvironmentConfig#CLEANER_MIN_FILE_UTILIZATION}, irrespective of overall * utilization.

* *

Current (actual) utilization is indicated by the following stats.

* * * * * * *
{@link #getCurrentMinUtilization} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_MIN_UTILIZATION_DESC} *
{@link #getCurrentMaxUtilization} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_MAX_UTILIZATION_DESC} *
* *

If TTL is not used, the minimum and maximum utilization will be the * same. If TTL is used, the minimum and maximum define a range that bounds * the actual utilization. The current utilization is not known precisely when * TTL is used because of the potential overlap between expired data and * data that has become obsolete due to updates or deletions. See * Cleaning Statistics: TTL and expired data.

* *

If the cleaner is successfully maintaining the target utilization, the * current utilization (indicated by the above stats) is normally slightly * lower than the target utilization. This is because the cleaner is not * activated until current utilization drops below the target utilization and * it takes time for the cleaner to free space and raise the current * utilization. If the current utilization is significantly lower than the * target utilization (e.g., more than five percentage points lower), this * typically means the cleaner is unable to maintain the target utilization. * (When the minimum and maximum utilization stats are unequal, we recommend * using the maximum utilization for this determination.)

* *

When the cleaner is unable to maintain the target utilization, it will * clean files continuously in an attempt to reach the target. This will use * significant system resources in the best case and will use all available * disk space in the worst case, so the source of the problem should be * identified and corrected using the guidelines below.

*
    *
  • One possibility is that the cleaner is unable to keep up simply * because there are many more application threads generating waste than * there are cleaner threads. To rule this out, try increasing the * number of {@link EnvironmentConfig#CLEANER_THREADS cleaner threads}. * For example, the NoSQL DB product uses two cleaner threads.
  • * *
  • The cleaner may be able to keep up with generated waste, but due to * cleaning efficiency factors (explained in the next section) it may * not be able to maintain the configured target utilization, or it may be * consuming large amounts of resources in order to do so. In this case, * configuring a lower {@link EnvironmentConfig#CLEANER_MIN_UTILIZATION * target utilization} is one solution. For example, the NoSQL DB product * uses a target utilization of 40%. See the next section for additional * guidelines.
  • * *
  • In extreme cases, cleaning efficiency factors make it impossible for * the cleaner to make forward progress, meaning that more obsolete space * is generated by cleaning than can be reclaimed. This will eventually * result in using all available disk space. To avoid this, follow the * guidelines above and in the next section.
  • *
* *

Cleaning Efficiency

* *

The general guidelines for ensuring that the cleaner can maintain * the target utilization are:

*
    *
  1. ensure that the JE cache is sized * appropriately, and
  2. * *
  3. avoid large record keys, especially when the data size is small * yet too large to be embedded in the BIN (as discussed below).
  4. *
*

This remainder of this section is intended to help understand the * reasons for these recommendations and to aid in advanced tuning.

* *

A JE data file consists mainly of Btree nodes, which include internal * nodes (INs) and leaf nodes (LNs). Each IN contains the keys of roughly * {@link EnvironmentConfig#NODE_MAX_ENTRIES 100 records}, while an LN * contains the key and data of a single record. When the cleaner processes * a data file it migrates (copies) active LNs to the end of the log, and * dirties their parent BINs (bottom internal nodes). Active INs are dirtied * but not immediately copied. The next checkpoint will then write the INs * dirtied as a result of cleaning the file: *

    *
  • The BIN parents of the active LNs from the cleaned file.
  • *
  • The active INs from the cleaned file, and their parent INs.
  • *
*

Finally, now that the persistent form of the Btree contains no * references to the cleaned file, the file can be deleted. (In HA * environments the file is not deleted immediately as * will be discussed.)

* *

When LNs are migrated, logging of their dirtied parent BINs causes the * previous version of these BINs to become obsolete. In many cases the * previous version may be in a different file than the cleaned file. So * although the cleaner reclaims the space for the obsolete data in the * cleaned file, it also creates some amount of additional obsolete space.

* *

The ratio of reclaimed space to additional obsolete space determines the * maximum utilization that can result from cleaning. If this maximum is less * than the target utilization, the cleaner will run continuously in an attempt * to reach the target and will consume large amounts of system resources. * Several factors influence how much additional obsolete space is created:

*
    *
  • The lower the utilization of the file selected for cleaning, the * less active LNs are migrated. This means less parent BINs are dirtied * and logged, and therefore less obsolete space is created. For this * reason, specifying a lower target utilization will cause cleaning to be * less expensive. Also, this is why JE always selects files with the * lowest utilization for cleaning. Some application workloads vary over * time and create a mix of high and low utilization files, while others * are consistent over time and all files have the same utilization; * cleaning will be more efficient for workloads of the first type.
  • * *
  • A special case is when a records' data is stored (embedded) in the * BIN. This is referred to as an embedded LN. Embedded LNs are * not migrated by the cleaner (they are no longer needed after * transaction processing), so embedded LNs do not cause the creation of * additional obsolete space during cleaning. LNs are embedded in two * situations: *
      *
    1. All LNs are embedded in a {@link * DatabaseConfig#setSortedDuplicates DB with duplicate keys}. Such * databases are normally {@link SecondaryDatabase}s.
    2. * *
    3. In a DB where duplicate keys are not allowed (which is the * default for a {@link Database}), LNs are embedded when the data * size is no larger than * {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN}. Such databases * are normally primary databases, but in rare cases can be * {@link SecondaryDatabase}s.
    4. *
    *
  • * *
  • When non-embedded LNs have a relatively large data size, less * LNs per file are migrated and therefore less obsolete space is * created. On the other hand, when the data size is small, yet too * large to be {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN embedded}, * significant amounts of obsolete space may be created by cleaning. This * is because many LNs are migrated per file, and for each of these LNs a * BIN is dirtied. {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN} can be * increased to solve this problem in some cases, but (as described in its * javadoc) increasing this value will increase BIN cache usage and * should be done with caution.
  • * *
  • When LNs have a relatively large key size, their parent BINs are * also larger. When these LNs are not embedded, the larger BIN size means * that more obsolete space is created by cleaning. Even when the LNs * are embedded, normal write operations will create more * obsolete space and BIN cache usage will be increased.
  • * *
  • For the reasons stated above, a worst case for creation of obsolete * space during cleaning is when LNs have large keys and small data, yet * not small enough to be embedded.
  • * *
  • The larger the {@link EnvironmentConfig#CHECKPOINTER_BYTES_INTERVAL * checkpoint interval}, the more likely it is that migration of two or * more LNs will dirty a single parent BIN (assuming the absence of cache * eviction). This causes less BINs to be logged as a result of migration, * so less obsolete space is created. In other words, increasing the * checkpoint interval increases write absorption. This is true for * ordinary record write operations as well as LN migration.
  • *
* *

Even when cleaning does not create significant amounts of additional * obsolete space, an undersized cache can still * prevent the cleaner from maintaining the target utilization when eviction * of dirty BINs occurs. When eviction causes logging of dirty BINs, this * reduces or even cancels out the write absorption benefits that normally * occur due to periodic checkpoints. In the worst case, every record write * operation causes a BIN to be written as well, which means that large * amounts of obsolete data will be created at a high rate. The * {@link #getNDirtyNodesEvicted()} and {@link #getOffHeapDirtyNodesEvicted()} * cache statistics can help to identify this problem.

* *

Even when cleaning is maintaining the target utilization, it may * consume large amounts of system resources in order to do so. The * following indicators of cleaning activity can be used to get a rough idea * of the level of cleaning activity.

* * * * * * *
{@link #getNCleanerRuns} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_RUNS_DESC} *
{@link #getNCleanerEntriesRead} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_ENTRIES_READ_DESC} *
* *

As mentioned earlier, configuring a lower * {@link EnvironmentConfig#CLEANER_MIN_UTILIZATION target utilization} is one * way to reduce cleaner resource consumption.

* *

The read IO caused by cleaning is indicated by the following stat:

* * * * *
{@link #getNCleanerDiskRead} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_DISK_READS_DESC} *
* *

The impact of cleaner read IO can sometimes be reduced by increasing the * {@link EnvironmentConfig#CLEANER_READ_SIZE read buffer size}.

* *

The write IO caused by cleaning is due to {@link #getNLNsMigrated() * active LN migration} and by logging of INs that were dirtied by the * cleaner. Both of these costs can be reduced by decreasing the * {@link EnvironmentConfig#CLEANER_MIN_UTILIZATION target utilization}. * Logging of dirty INs can also be reduced by using smaller key sizes, * especially when the data size is small, yet too large to be embedded in * the BIN.

* *

When a workload involves inserting and deleting large numbers of * records, another way of increasing cleaner efficiency is to remove the * records using {@link WriteOptions#setTTL(int) TTL} or {@link * ExtinctionFilter record extinction}, rather than performing transactional * record deletions. When records have expired or become extinct, the cleaner * can discard the LNs without a Btree lookup as described in the next * section. Also, because there are no transactional deletions there is less * cleaner metadata and less writing overall.

* *

Cleaning Statistics: Processing * Details

* *

This section describes details of cleaner file processing. The stats in * this section are useful for internal analysis and debugging.

* *

When the cleaner processes a data file, it reads the Btree entries: LNs, * BIN-deltas, BINs and upper INs. The number of entries processed is * {@link #getNCleanerEntriesRead()}. (There are a small number of additional * non-Btree entries in each file that are always obsolete and are completely * ignored by the cleaner.)

* *

The first step of processing a Btree entry is to determine if it is * known-obsolete. A known-obsolete entry is one of the following:

*
    *
  • A Btree entry that was recorded as obsolete in the cleaner's * per-file metadata during transaction processing.
  • * *
  • A Btree entry that belongs to a Database that has been * {@link Environment#removeDatabase removed} or * {@link Environment#truncateDatabase truncated}. Note that DBs are * added to a pending DB queue if the removal or truncation is not yet * complete; this is discussed in the next section.
  • * *
  • An LN entry representing a record deletion in the transaction * log.
  • * *
  • An LN that has {@link #getNLNsExpired() expired}. Expired LNs * result from the use of {@link WriteOptions#setTTL(int) TTL}.
  • * *
  • An LN that has become {@link #getNLNsExtinct() extinct}. Extinct * LNs result from using an {@link ExtinctionFilter} along with the * {@link Environment#discardExtinctRecords discardExtinctRecords} * method.
  • *
* *

Known-obsolete entries are very inexpensive to process because no * Btree lookup is required to determine that they are obsolete, and they * can simply be discarded. The number of known-obsolete entries is the sum * of the following {@code XxxObsolete} stats:

* * * * * * * * *
{@link #getNLNsObsolete} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_OBSOLETE_DESC} *
{@link #getNINsObsolete} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_OBSOLETE_DESC} *
{@link #getNBINDeltasObsolete} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_OBSOLETE_DESC} *
* *

Entries that are not known-obsolete must be processed by performing a * Btree lookup to determine whether they're active or obsolete. These are * indicated by the following {@code XxxCleaned} stats:

* * * * * * * * *
{@link #getNLNsCleaned} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_CLEANED_DESC} *
{@link #getNINsCleaned} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_CLEANED_DESC} *
{@link #getNBINDeltasCleaned} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_CLEANED_DESC} *
* *

The sum of the {@code XxxObsolete} and {@code XxxCleaned} stats * is the {@link #getNCleanerEntriesRead() total processed}:

* *
CleanerEntriesRead =
 *     (LNsObsolete + INsObsolete + BINDeltasObsolete) +
 *     (LNsCleaned + INsCleaned + BINDeltasCleaned)
* *

The number of expired and extinct LNs are broken out as separate stats. * These are a subset of the known-obsolete LNs:

* * * * * * *
{@link #getNLNsExpired} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_EXPIRED_DESC} *
{@link #getNLNsExtinct} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_EXTINCT_DESC} *
* *

If the Btree lookup does not find the entry, then it is actually * obsolete. This can happen for two reasons:

*
    *
  • The obsolete entry was not recorded as obsolete in the cleaner * metadata during transaction processing. The recording of this * metadata is not always guaranteed.
  • * *
  • The entry became obsolete during processing of the file. The cleaner * loads its metadata when file processing starts, and this metadata is not * updated during file processing.
  • *
*

Such entries are indicated by the {@code XxxDead} stats:

* * * * * * * * *
{@link #getNLNsDead} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_DEAD_DESC} *
{@link #getNINsDead} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_DEAD_DESC} *
{@link #getNBINDeltasDead} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_DEAD_DESC} *
* *

If the entry is active in the Btree, it must be preserved by the * cleaner. Such entries are indicated by the following stats:

* * * * * * * * * * * * *
{@link #getNLNsMigrated} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_MIGRATED_DESC} *
{@link #getNLNsMarked} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_MARKED_DESC} *
{@link #getNLNsLocked} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_LOCKED_DESC} *
{@link #getNINsMigrated} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_MIGRATED_DESC} *
{@link #getNBINDeltasMigrated} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_MIGRATED_DESC} *
* *

The stats above have the following meanings:

*
    *
  • {@link #getNLNsMigrated() Migrated LNs} are logged when they * are processed by the cleaner.
  • * *
  • {@link #getNLNsMarked() Marked LNs} are active LNs in * {@link DatabaseConfig#setTemporary temporary DBs} that are marked * dirty. They will be logged only if they are evicted from cache.
  • * *
  • {@link #getNLNsLocked() Locked LNs} cannot be processed * immediately and they are added to a pending queue. Pending LNs are * discussed in the next section.
  • * *
  • {@link #getNINsMigrated() Migrated INs} are simply marked dirty, * and they will be logged by the next checkpoint.
  • * *
  • {@link #getNBINDeltasMigrated() Migrated BIN-deltas} are also * simply marked dirty.
  • *
* *

The stats above provide a break down of cleaned entries as follows:

*
    *
  • {@code LNsCleaned = LNsDead + LNsMigrated + LNsMarked + * LNsLocked}
  • * *
  • {@code INsCleaned = INsDead + INsMigrated}
  • * *
  • {@code BINDeltasCleaned = BINDeltasDead + BINDeltasMigrated}
  • *
* *

When LNs are processed, a queue is used to reduce Btree lookups. * LNs are added to the queue when cleaning is needed (they are not * known-obsolete). When the queue fills, the oldest LN in the queue is * processed. If the LN is found in the Btree, the other LNs in the queue are * checked to see if they have the same parent BIN. If so, these LNs can * be processed while the BIN is latched, without an additional Btree lookup. * The number of such LNs is indicated by the following stat:

* * * * *
{@link #getNLNQueueHits} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNQUEUE_HITS_DESC} *
* *

The LN queue is most beneficial when LNs are inserted or updated in * key order. The maximum size of the queue, expressed as its maximum memory * size, can be changed via the * {@link EnvironmentConfig#CLEANER_LOOK_AHEAD_CACHE_SIZE} param.

* *

Cleaning Statistics: Pending LNs and * DBs

* *

When the cleaner is processing a Btree entry (LN or IN) there are two * cases where completion of cleaning (and deletion of the file) must be * deferred.

*
    *
  • If an LN that is potentially active (not known-obsolete) is * write-locked, the cleaner cannot determine whether it must be * migrated until the locking transaction ends, either by aborting or * committing.
  • * *
  • If an LN or IN belongs to a Database that is in the process of * being removed or truncated, the LN or IN is considered * known-obsolete but cleaner must wait until the DB removal/truncation * is complete before the file can be deleted.
  • *
* *

If one of these conditions occurs, the LN or DB is added to a pending * queue. The cleaner will periodically process the entries in the queue and * attempt to resolve them as follows.

*
    *
  • When a pending LN is no longer write-locked, a Btree lookup is * performed and the LN is either migrated or considered dead. The LN is * removed from the pending queue.
  • * *
  • When removal/truncation is complete for a pending DB, the DB * is simply removed from the pending queue.
  • *
* *

When there are no more pending LNs and DBs for a given file then * cleaning of the file will be considered complete and it will become a * candidate for deletion after the next checkpoint. If a pending entry * causes file deletion to be delayed, because the pending entries cannot be * resolved before the next checkpoint, a WARNING level message is logged * with more information about the pending entries.

* *

The following stats indicate the size of the pending LN queues, how many * LNs in the queue have been processed, and of those processed how many * remain unresolved because the record is still write-locked.

* * * * * * * * *
{@link #getPendingLNQueueSize} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LN_QUEUE_SIZE_DESC} *
{@link #getNPendingLNsProcessed} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LNS_PROCESSED_DESC} *
{@link #getNPendingLNsLocked} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LNS_LOCKED_DESC} *
* *

If pending LNs remain unresolved, this could mean an application or JE * bug has prevented a write-lock from being released. This could happen, * for example, if the application fails to end a transaction or close a * cursor. For such bugs, closing and re-opening the Environment is usually * needed to allow file deletion to proceed. If this occurs for multiple files * and is not resolved, it can eventually lead to an out-of-disk situation.

* *

The following stats indicate the size of the pending DB queue, how many * DBs in the queue have been processed, and of those processed how many * remain unresolved because the removal/truncation is still incomplete.

* * * * * * * * *
{@link #getPendingDBQueueSize} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_DB_QUEUE_SIZE_DESC} *
{@link #getNPendingDBsProcessed} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_DBS_PROCESSED_DESC} *
{@link #getNPendingDBsIncomplete} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_DBS_INCOMPLETE_DESC} *
* *

If pending DBs remain unresolved, this may indicate that the * asynchronous portion of DB removal/truncation is taking longer than * expected. After a DB removal/truncation transaction is committed, JE * asynchronously counts the data for the DB obsolete.

* *

Cleaning Statistics: TTL and expired data

* *

When the {@link WriteOptions#setTTL(int) TTL} feature is used, the * obsolete portion of the log includes data that has expired. An expiration * histogram is stored for each file and is used to compute the expired size. * The current {@link #getCurrentMinUtilization() minimum} and {@link * #getCurrentMaxUtilization() maximum} utilization are the lower and upper * bounds of computed utilization. They are different only when the TTL * feature is used, and some data in the file has expired while other data * has become obsolete for other reasons, such as record updates, record * deletions or checkpoints. In this case the strictly obsolete size and the * expired size may overlap because they are maintained separately.

* *

If the two sizes overlap completely then the minimum utilization is * correct, while if there is no overlap then the maximum utilization is * correct. Both utilization values trigger cleaning, but when there is * significant overlap, the cleaner will perform two-pass cleaning. The * following stats indicate the use of two-pass cleaning:

* * * * * * *
{@link #getNCleanerTwoPassRuns} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_TWO_PASS_RUNS_DESC} *
{@link #getNCleanerRevisalRuns} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_REVISAL_RUNS_DESC} *
* *

In the first pass of two-pass cleaning, the file is read to recompute * obsolete and expired sizes, but the file is not cleaned. As a result of * recomputing the expired sizes, the strictly obsolete and expired sizes * will no longer overlap, and the minimum and maximum utilization will be * equal. If the file should still be cleaned, based on the recomputed * utilization, it is cleaned as usual, and in this case the number of * {@link #getNCleanerTwoPassRuns() two-pass runs} is incremented.

* *

If the file should not be cleaned because its recomputed utilization is * higher than expected, the file will not be cleaned. Instead, its recomputed * expiration histogram, which now has size information that does not overlap * with the strictly obsolete data, is stored for future use. By storing the * revised histogram, the cleaner can select the most appropriate files for * cleaning in the future. In this case the number of {@link * #getNCleanerRevisalRuns() revisal runs} is incremented, and the number of * {@link #getNCleanerRuns() total runs} is not incremented. * *

Cleaning Statistics: Disk Space * Management

* *

The JE cleaner component is also responsible for checking and enforcing * the {@link EnvironmentConfig#MAX_DISK} and {@link * EnvironmentConfig#FREE_DISK} limits, and for protecting cleaned files from * deletion while they are in use by replication, backups, etc. This * process is described in the {@link EnvironmentConfig#MAX_DISK} javadoc. The * stats related to disk space management are:

* * * * * * * * * * * * * * * * *
{@link #getActiveLogSize()} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_ACTIVE_LOG_SIZE_DESC} *
{@link #getAvailableLogSize()} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_AVAILABLE_LOG_SIZE_DESC} *
{@link #getReservedLogSize()} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_RESERVED_LOG_SIZE_DESC} *
{@link #getProtectedLogSize()} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PROTECTED_LOG_SIZE_DESC} *
{@link #getProtectedLogSizeMap()} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PROTECTED_LOG_SIZE_MAP_DESC} *
{@link #getTotalLogSize()} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_TOTAL_LOG_SIZE_DESC} *
{@link #getNCleanerDeletions()} * {@value com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_DELETIONS_DESC} *
* *

The space taken by all data files, {@code totalLogSize}, is divided into * categories according to these stats as illustrated below.

*
 *     /--------------------------------------------------\
 *     |                                                  |
 *     | Active files -- have not been cleaned            |
 *     |                 and cannot be deleted            |
 *     |                                                  |
 *     |             Utilization =                        |
 *     |    (utilized size) / (total active size)         |
 *     |                                                  |
 *     |--------------------------------------------------|
 *     |                                                  |
 *     | Reserved files -- have been cleaned and          |
 *     |                   can be deleted                 |
 *     |                                                  |
 *     | /----------------------------------------------\ |
 *     | |                                              | |
 *     | | Protected files -- temporarily in use by     | |
 *     | |                    replication, backups, etc.| |
 *     | |                                              | |
 *     | \----------------------------------------------/ |
 *     |                                                  |
 *     \--------------------------------------------------/
 * 
* *

A key point is that reserved data files will be deleted by JE * automatically to prevent violation of a disk limit, as long as the files * are not protected. This has two important implications:

*
    *
  • The {@link #getCurrentMinUtilization() current utilization} stats * are calculated based only on the active data files. Reserved files are * ignored in this calculation.
  • * *
  • The {@link #getAvailableLogSize() availableLogSize} stat includes * the size of the reserved files that are not protected. These files * will be deleted automatically, if this is necessary to allow write * operations.
  • *
* *

We strongly recommend using {@code availableLogSize} to monitor disk * usage and take corrective action well before this value reaches zero. * Monitoring the file system free space is not a substitute for this, since * the data files include reserved files that will be deleted by JE * automatically.

* *

Applications should normally define a threshold for {@code * availableLogSize} and raise an alert of some kind when the threshold is * reached. When this happens applications may wish to free space (by * deleting records, for example) or expand storage capacity. If JE write * operations are needed as part of this procedure, corrective action * must be taken while there is still enough space available to perform the * write operations.

* *

For example, to free space by deleting records requires enough space to * log the deletions, and enough temporary space for the cleaner to reclaim * space for the deleted records. As described in the sections above, the * cleaner uses more disk space temporarily in order to migrate LNs, and a * checkpoint must be performed before deleting the cleaned files.

* *

How much available space is needed is application specific and testing * may be required to determine the application's {@code availableLogSize} * threshold. Note that the default {@link EnvironmentConfig#FREE_DISK} * value, five GB, may or may not be large enough to perform the application's * recovery procedure. The default {@code FREE_DISK} limit is intended to * reserve space for recovery when application monitoring of * {@code availableLogSize} fails and emergency measures must be taken.

* *

If {@code availableLogSize} is unexpectedly low, it is possible that * protected files are preventing space from being reclaimed. This could be * due to replication, backups, etc. See {@link #getReservedLogSize()} and * {@link #getProtectedLogSizeMap()} for more information.

* *

It is also possible that data files cannot be deleted due to read-only * processes. When one process opens a JE environment in read-write mode and * one or more additional processes open the environment in {@link * EnvironmentConfig#setReadOnly(boolean) read-only} mode, the read-only * processes will prevent the read-write process from deleting data files. * For this reason, long running read-only processes are strongly * discouraged in a production environment. When data file deletion is * prevented for this reason, a SEVERE level message is logged with more * information.

* *

I/O Statistics

* *

Group Name: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Description: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_DESC}

* *

I/O Statistics: File Access

* *

JE accesses data files (.jdb files) via Java's standard file system * APIs. Because opening a file is relatively expensive, an LRU-based * cache of open file handles is maintained. The stats below indicate how * many cached file handles are currently open and how many open file * operations have taken place.

* * * * * * *
{@link #getNOpenFiles()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_OPEN_FILES_DESC} *
{@link #getNFileOpens()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_FILE_OPENS_DESC} *
* *

To prevent expensive file open operations during record read operations, * set {@link EnvironmentConfig#LOG_FILE_CACHE_SIZE} to the maximum number of * data files expected in the Environment.

* *

Note that JE may open the same file more than once. If a read operation * in one thread is accessing a file via its cached handle and another thread * attempts to read from the same file, a temporary handle is opened just for * the duration of the read. The {@link #getNFileOpens()} stat includes open * operations for both cached file handles and temporary file handles. * Therefore, this stat cannot be used to determine whether the file cache * is too small.

* *

When a file read is performed, it is always possible for the read buffer * size to be smaller than the log entry being read. This is because JE's * append-only log contains variable sized entries rather than pages. If the * read buffer is too small to contain the entire entry, a repeat read with a * larger buffer must be performed. These additional reads can be reduced by * monitoring the following two stats and increasing the read buffer size as * described below.

* *

When Btree nodes are read at known file locations (by user API * operations, for example), the following stat indicates the number of * repeat reads:

* * * * *
{@link #getNRepeatFaultReads()} * {@value com.sleepycat.je.log.LogStatDefinition#LOGMGR_REPEAT_FAULT_READS_DESC} *
* *

When the number of {@link #getNRepeatFaultReads()} is significant, * consider increasing {@link EnvironmentConfig#LOG_FAULT_READ_SIZE}.

* *

When data files are read sequentially (by the cleaner, for example) the * following stat indicates the number of repeat reads:

* * * * *
{@link #getNRepeatIteratorReads()} * {@value com.sleepycat.je.log.LogStatDefinition#LOGMGR_REPEAT_ITERATOR_READS_DESC} *
* *

When the number of {@link #getNRepeatIteratorReads()} is significant, * consider increasing {@link EnvironmentConfig#LOG_ITERATOR_MAX_SIZE}.

* *

The two groups of stats below indicate JE file system reads and writes * as number of operations and number of bytes. These stats are roughly * divided into random and sequential operations by assuming that storage * devices can optimize for sequential access if two consecutive operations * are performed one MB or less apart in the same file. This categorization * is approximate and may differ from the actual number depending on the * type of disks and file system, disk geometry, and file system cache * size.

* *

The JE file read and write stats can sometimes be useful for debugging * or for getting a rough idea of I/O characteristics. However, monitoring * of system level I/O stats (e.g., using {@code iostat}) gives a more * accurate picture of actual I/O since access via the buffer cache is * not included. In addition the JE stats are not broken out by operation * type and therefore don't add a lot of useful information to the system * level I/O stats, other than the rough division of random and sequential * I/O.

* *

The JE file read stats are:

* * * * * * * * * * *
{@link #getNRandomReads()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_RANDOM_READS_DESC} *
{@link #getNRandomReadBytes()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_RANDOM_READ_BYTES_DESC} *
{@link #getNSequentialReads()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_SEQUENTIAL_READS_DESC} *
{@link #getNSequentialReadBytes()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_SEQUENTIAL_READ_BYTES_DESC} *
* *

JE file read stats include file access resulting from the following * operations. Because internal operations are included, it is not practical * to correlate these stats directly to user operations.

*
    *
  • User read operations via the JE APIs, e.g., * {@link Database#get(Transaction, DatabaseEntry, DatabaseEntry, Get, * ReadOptions) Database.get} and * {@link Cursor#get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) * Cursor.get}.
  • * *
  • Utility operations that access records such as {@link * EnvironmentConfig#VERIFY_BTREE Btree verification}.
  • * *
  • In a replicated environment, reads are performed by replication and * mastership changes (syncup). In all environments, reads are performed * by recovery (Environment open) and log cleaning. * Log cleaning is typically a significant contributor to read I/O.
  • * *
  • Note that the reads above can cause more read I/O than expected * when {@link #getNRepeatFaultReads()} or * {@link #getNRepeatIteratorReads()} are consistently non-zero.
  • * *
  • Note that while {@link EnvironmentConfig#VERIFY_LOG log * verification} does perform read I/O, this I/O is not included * in the JE file read stats. The same is true for a * {@link com.sleepycat.je.rep.NetworkRestore} in a replicated * environment: the read I/O on the source node is not counted in the JE * file read stats.
  • *
* *

The JE file write stats are:

* * * * * * * * * * *
{@link #getNRandomWrites()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_RANDOM_WRITES_DESC} *
{@link #getNRandomWriteBytes()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_RANDOM_WRITE_BYTES_DESC} *
{@link #getNSequentialWrites()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_SEQUENTIAL_WRITES_DESC} *
{@link #getNSequentialWriteBytes()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_SEQUENTIAL_WRITE_BYTES_DESC} *
* *

JE file write stats include file access resulting from the following * operations. As with the read stats, because internal operations are * included it is not practical to correlate the write stats directly to user * operations.

*
    *
  • User write operations via the JE APIs, e.g., * {@link Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, * WriteOptions) Database.put} and * {@link Cursor#put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) * Cursor.put}.
  • * *
  • A small number of internal record write operations are performed * to maintain JE internal data structures, e.g., cleaner metadata.
  • * *
  • Writes are performed by checkpointing and * eviction. Checkpoints are typically a * significant contributor to write I/O.
  • * *
  • Note that while {@link com.sleepycat.je.rep.NetworkRestore} does * perform write I/O, this I/O is not included in the JE file * write stats.
  • *
* *

I/O Statistics: Logging Critical Section

* *

JE uses an append-only storage system where each log entry is * assigned an LSN (log sequence number). The LSN is a 64-bit integer * consisting of two 32-bit parts: the file number is the high order * 32-bits and the file offset is the low order 32-bits.

* *

LSNs are used in the Btree to reference child nodes from their parent * node. Therefore a node's LSN is assigned when the node is written, * including the case where the write is buffered. * The next LSN to be assigned is indicated by the following stat:

* * * * *
{@link #getEndOfLog()} * {@value com.sleepycat.je.log.LogStatDefinition#LOGMGR_END_OF_LOG_DESC} *
* *

LSN assignment and assignment of log buffer * space must be performed serially, and therefore these operations occur in * a logging critical section. In general JE strives to do as little * additional work as possible in the logging critical section. However, in * certain cases additional operations are performed in the critical section * and these generally impact performance negatively. These special cases * will be noted in the sections that follow.

* *

I/O Statistics: Log Buffers

* *

A set of JE log buffers is used to buffer writes. When write operations * use {@link SyncPolicy#NO_SYNC}, a file write is not performed until a log * buffer is filled. This positively impacts performance by reducing the * number of file writes. Note that checkpoint writes use {@code NO_SYNC}, so * this benefits performance even when user operations do not use * {@code NO_SYNC}.

* *

(When {@link SyncPolicy#SYNC} or {@link SyncPolicy#WRITE_NO_SYNC} * is used, the required file write and fsync are performed using a group * commit mechanism, which is described further * below.)

* *

The size and number of log buffers is configured using * {@link EnvironmentConfig#LOG_BUFFER_SIZE}, * {@link EnvironmentConfig#LOG_NUM_BUFFERS} and * {@link EnvironmentConfig#LOG_TOTAL_BUFFER_BYTES}. The resulting total size * and number of buffers is indicated by the following stats:

* * * * * * *
{@link #getBufferBytes()} * {@value com.sleepycat.je.log.LogStatDefinition#LBFP_BUFFER_BYTES_DESC} *
{@link #getNLogBuffers()} * {@value com.sleepycat.je.log.LogStatDefinition#LBFP_LOG_BUFFERS_DESC} *
* *

The default buffer size (one MB) is expected to be optimal for most * applications. In NoSQL DB, the default buffer size is used. However, if an * individual entry (e.g., a BIN or LN) is larger than the buffer size, the * log buffer mechanism is bypassed and this can negatively impact * performance. When writing such an entry, the write occurs in the critical * section using a temporary buffer, and any dirty log buffers all also * written in the critical section. When this occurs it is indicated by the * following stat:

* * * * *
{@link #getNTempBufferWrites()} * {@value com.sleepycat.je.log.LogStatDefinition#LOGMGR_TEMP_BUFFER_WRITES_DESC} *
* *

When {@link #getNTempBufferWrites()} is consistently non-zero, consider * increasing the log buffer size.

* *

The number of buffers also impacts write performance when many threads * are performing write operations. The use of multiple buffers allows one * writing thread to flush the completed dirty buffers while other writing * threads add entries to "clean" buffers (that have already been written).

* *

If many threads are adding to clean buffers while the completed dirty * buffers are being written, it is possible that no more clean buffers will * be available for adding entries. When this happens, the dirty buffers * are flushed in the critical section, which can negatively impact * performance. This is indicated by the following stat:

* * * * *
{@link #getNNoFreeBuffer()} * {@value com.sleepycat.je.log.LogStatDefinition#LBFP_NO_FREE_BUFFER_DESC} *
* *

When {@link #getNNoFreeBuffer()} is consistently non-zero, consider * increasing the number of log buffers.

* *

The number of log buffers also impacts read performance. JE read * operations use the log buffers to read entries that were recently written. * This occurs infrequently in the case of user read operations via the JE * APIs, since recently written data is infrequently read and is often * resident in the cache. However, it does occur * frequently and is an important factor in the following cases:

*
    *
  • A transaction abort reads the entries written by the transaction * in order to undo them. These entries should normally be available in * the log buffers. Avoiding file reads reduces the latency of aborted * transactions.
  • * *
  • In a replicated environment, the master node must read recently * written entries needed by replicas or secondary nodes. By reading these * entries from the log buffers, the likelihood of the need for file reads * is reduced, and this can prevent lagging replicas from falling further * behind.
  • *
* *

Because of the last point above involving replication, in NoSQL DB the * number of log buffers is set to 16. In general we recommend configuring 16 * buffers or more for a replicated environment.

* *

The following stats indicate the number of requests to read log entries * by LSN, and the number that were not found in the log buffers.

* * * * * * *
{@link #getNNotResident()} * {@value com.sleepycat.je.log.LogStatDefinition#LBFP_NOT_RESIDENT_DESC} *
{@link #getNCacheMiss()} * {@value com.sleepycat.je.log.LogStatDefinition#LBFP_MISS_DESC} *
* *

In general these two stats are used only for internal JE debugging and * are not useful to the application. This is because * {@link #getNNotResident()} is roughly the sum of the * {@code VLSNIndex nMisses} replication stat and the cache fetch miss stats: * {@link #getNLNsFetchMiss()}, {@link #getNBINsFetchMiss()}, * {@link #getNFullBINsMiss} and {@link #getNUpperINsFetchMiss()}.

* *

I/O Statistics: The Write Queue

* *

JE performs special locking to prevent an fsync and a file write from * executing concurrently. * TODO: Why is this disallowed for all file systems?.

* *

The write queue is a single, low-level buffer that reduces blocking due * to a concurrent fsync and file write request. * When a write of a dirty log buffer is needed to free a log buffer for a * {@link SyncPolicy#NO_SYNC} operation (i.e., durability is not required), * the write queue is used to hold the data temporarily and allow a log * buffer to be freed.

* *

Use of the write queue is strongly recommended since there is no known * drawback to using it. It is enabled by default and in NoSQL DB. However, * it can be disabled if desired by setting * {@link EnvironmentConfig#LOG_USE_WRITE_QUEUE} to {@code false}.

* *

The following stats indicate use of the write queue for satisfying * file write and read requests. Note that when the write queue is enabled, * all file read requests must check the write queue to avoid returning stale * data.

* * * * * * * * * * *
{@link #getNWritesFromWriteQueue()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_WRITES_FROM_WRITEQUEUE_DESC} *
{@link #getNBytesWrittenFromWriteQueue()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE_DESC} *
{@link #getNReadsFromWriteQueue()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_READS_FROM_WRITEQUEUE_DESC} *
{@link #getNBytesReadFromWriteQueue()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_BYTES_READ_FROM_WRITEQUEUE_DESC} *
* *

The default size of the write queue (one MB) is expected to be adequate * for most applications. Note that the write queue size should never be * smaller than the log buffer size (which is also one MB by default). In * NoSQL DB, the default sizes for the write queue and the log buffer are * used.

* *

However, when many {@code NO_SYNC} writes are requested during an fsync, * some write requests may have to block until the fsync is complete. This * is indicated by the following stats:

* * * * * * *
{@link #getNWriteQueueOverflow()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_WRITEQUEUE_OVERFLOW_DESC} *
{@link #getNWriteQueueOverflowFailures()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES_DESC} *
* *

When a {@code NO_SYNC} write request occurs during an fsync and the * size of the write request's data is larger than the free space in the * write queue, the {@link #getNWriteQueueOverflow()} stat is incremented. * When this stat is consistently non-zero, consider the following possible * reasons and remedies:

*
    *
  • The log buffer size may be larger than the write queue size. Ensure * that the {@link EnvironmentConfig#LOG_WRITE_QUEUE_SIZE} is at least as * large as the {@link EnvironmentConfig#LOG_BUFFER_SIZE}.
  • * *
  • An individual log entry (e.g., a BIN or LN) may be larger than the * log buffer size, as indicated by the {@link #getNTempBufferWrites()} * stat. Consider increasing {@code LOG_WRITE_QUEUE_SIZE} and/or * {@code LOG_BUFFER_SIZE} such that {@link #getNTempBufferWrites()} is * consistently zero.
  • * *
  • When multiple threads perform {@code NO_SYNC} write requests * during a single fync, the write queue may not be large enough to * prevent overflows. After the two causes above have been ruled out, * consider increasing the {@code LOG_WRITE_QUEUE_SIZE}.
  • *
* *

When such a write queue overflow occurs, JE will wait for the fsync to * complete, empty the write queue by writing it to the file, and attempt * again to add the data to the write queue. If this fails again because * there is still not enough free space in the write queue, then the * {@link #getNWriteQueueOverflowFailures()} stat is incremented. In this * case the data is written to the file rather than adding it to the write * queue, even though this may require waiting for an fsync to complete.

* *

If {@link #getNWriteQueueOverflowFailures()} is consistently non-zero, * the possible causes are the same as those listed above, and the remedies * described above should be applied.

* *

I/O Statistics: Fsync and Group Commit

* *

When {@link SyncPolicy#SYNC} or {@link SyncPolicy#WRITE_NO_SYNC} is * used for transactional write operations, the required file write and fsync * are performed using a group commit mechanism. In the presence of * concurrent transactions, this mechanism often allows performing a single * write and fsync for multiple transactions, while still ensuring that the * write and fsync are performed before the transaction {@code commit()} * method (or the {@code put()} or {@code delete()} operation method in this * case of auto-commit) returns successfully.

* *

First note that not all file write and fsync operations are due to * user transaction commits, and not all fsyncs use the group commit * mechanism.

*
    *
  • In several cases a transaction is committed for an internal * database, or a special log entry is written for an internal operation, * with {@code SYNC} or {@code WRITE_NO_SYNC} durability. In these cases * the group commit mechanism is used.
  • * *
  • When {@link Environment#flushLog} is called by the application, * a file write is performed and, if the {@code fsync} parameter is true, * an fsync is also performed. A file write and fsync are also performed * after an interval of no write activity, as determined by * {@link EnvironmentConfig#LOG_FLUSH_NO_SYNC_INTERVAL} and * {@link EnvironmentConfig#LOG_FLUSH_SYNC_INTERVAL}. In these cases the * group commit mechanism is not used.
  • * *
  • In several cases an fsync is performed internally that does * not use the group commit mechanism. These cases include: * completion of the last data file when a new file is created, flushing * of metadata before deleting a cleaned file, and Environment open and * close.
  • *
* *

The following stats describe all fsyncs performed by JE, whether or not * the group commit mechanism is used.

* * * * * * * * * * * * *
{@link #getNLogFSyncs()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_LOG_FSYNCS_DESC} *
{@link #getFSyncAvgMs()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_FSYNC_AVG_MS_DESC} *
{@link #getFSync95Ms()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_FSYNC_95_MS_DESC} *
{@link #getFSync99Ms()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_FSYNC_99_MS_DESC} *
{@link #getFSyncMaxMs()} * {@value com.sleepycat.je.log.LogStatDefinition#FILEMGR_FSYNC_MAX_MS_DESC} *
* *

Long fsync times often result in long transaction latencies. When this * is indicated by the above stats, be sure to ensure that the linux page * cache has been tuned to permit the OS to write asynchronously to disk * whenever possible. For the NoSQL DB product this is described under * Linux * Page Cache Tuning. To aid in diagnosing long fsyncs, a WARNING level * message is logged when the maximum fsync time exceeds * {@link EnvironmentConfig#LOG_FSYNC_TIME_LIMIT}

* *

The following stats indicate when group commit is requested for a write * operation. Group commit requests include all user transactions with {@code * SYNC} or {@code WRITE_NO_SYNC} durability, as well as the internal JE write * operations that use group commit.

* * * * * * *
{@link #getNGroupCommitRequests()} * {@value com.sleepycat.je.log.LogStatDefinition#FSYNCMGR_N_GROUP_COMMIT_REQUESTS_DESC} *
{@link #getNFSyncRequests()} * {@value com.sleepycat.je.log.LogStatDefinition#FSYNCMGR_FSYNC_REQUESTS_DESC} *
* *

All group commit requests result in a group commit operation that * flushes all dirty log buffers and the write queue using a file write. * In addition, requests using {@code SYNC} durability will cause the group * commit operation to include an fsync.

* *

Because group commit operations are performed serially, while a group * commit is executing in one thread, one or more other threads may be * waiting to perform a group commit. The group commit mechanism works by * forming a group containing the waiting threads. When the prior group * commit is finished, a single group commit is performed on behalf of the * new group in one of this group's threads, which is called the * leader. The other threads in the group are called * waiters and they proceed only after the leader has finished the * group commit.

* *

If a waiter thread waits longer than * {@link EnvironmentConfig#LOG_FSYNC_TIMEOUT} for the leader to finish the * group commit operation, the waiter will remove itself from the group and * perform a group commit operation independently. The number of such * timeouts is indicated by the following stat:

* * * * *
{@link #getNFSyncTimeouts()} * {@value com.sleepycat.je.log.LogStatDefinition#FSYNCMGR_TIMEOUTS_DESC} *
* *

The timeout is intended to prevent waiter threads from waiting * indefinitely due to an unexpected problem. If {@link #getNFSyncTimeouts()} * is consistently non-zero and the application is performing normally in * other respects, consider increasing * {@link EnvironmentConfig#LOG_FSYNC_TIMEOUT}.

* *

The following stat indicates the number of group commit operations that * included an fsync. There is currently no stat available indicating the * number of group commit operations that did not include an fsync.

* * * * *
{@link #getNFSyncs()} * {@value com.sleepycat.je.log.LogStatDefinition#FSYNCMGR_FSYNCS_DESC} *
* *

Note that {@link #getNFSyncs()} is a subset of the * {@link #getNLogFSyncs()} total that is described further above.

* *

Node Compression Statistics

* *

Group Name: {@value * com.sleepycat.je.incomp.INCompStatDefinition#GROUP_NAME} *
Description: {@value * com.sleepycat.je.incomp.INCompStatDefinition#GROUP_DESC}

* * * *

The following statistics are available. More information will be * provided in a future release.

* * * * * * * * * * * * * * *
{@link #getSplitBins()} * {@value com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_SPLIT_BINS_DESC} *
{@link #getDbClosedBins()} * {@value com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_DBCLOSED_BINS_DESC} *
{@link #getCursorsBins()} * {@value com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_CURSORS_BINS_DESC} *
{@link #getNonEmptyBins()} * {@value com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_NON_EMPTY_BINS_DESC} *
{@link #getProcessedBins()} * {@value com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_PROCESSED_BINS_DESC} *
{@link #getInCompQueueSize()} * {@value com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_QUEUE_SIZE_DESC} *
* *

Checkpoint Statistics

* *

Group Name: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_NAME} *
Description: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_DESC}

* *

The following statistics are available. More information will be * provided in a future release.

* * * * * * * * * * * * * * * * * * *
{@link #getNCheckpoints()} * {@value com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_CHECKPOINTS_DESC} *
{@link #getLastCheckpointInterval()} * {@value com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_LAST_CKPT_INTERVAL_DESC} *
{@link #getNFullINFlush()} * {@value com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_FULL_IN_FLUSH_DESC} *
{@link #getNFullBINFlush()} * {@value com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_FULL_BIN_FLUSH_DESC} *
{@link #getNDeltaINFlush()} * {@value com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_DELTA_IN_FLUSH_DESC} *
{@link #getLastCheckpointId()} * {@value com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_LAST_CKPTID_DESC} *
{@link #getLastCheckpointStart()} * {@value com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_LAST_CKPT_START_DESC} *
{@link #getLastCheckpointEnd()} * {@value com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_LAST_CKPT_END_DESC} *
* *

Lock Statistics

* *

Group Name: {@value * com.sleepycat.je.txn.LockStatDefinition#GROUP_NAME} *
Description: {@value * com.sleepycat.je.txn.LockStatDefinition#GROUP_DESC}

* *

The following statistics are available. More information will be * provided in a future release.

* * * * * * * * * * * * * * * * *
{@link #getNReadLocks()} * {@value com.sleepycat.je.txn.LockStatDefinition#LOCK_READ_LOCKS_DESC} *
{@link #getNWriteLocks()} * {@value com.sleepycat.je.txn.LockStatDefinition#LOCK_WRITE_LOCKS_DESC} *
{@link #getNOwners()} * {@value com.sleepycat.je.txn.LockStatDefinition#LOCK_OWNERS_DESC} *
{@link #getNRequests()} * {@value com.sleepycat.je.txn.LockStatDefinition#LOCK_REQUESTS_DESC} *
{@link #getNTotalLocks()} * {@value com.sleepycat.je.txn.LockStatDefinition#LOCK_TOTAL_DESC} *
{@link #getNWaits()} * {@value com.sleepycat.je.txn.LockStatDefinition#LOCK_WAITS_DESC} *
{@link #getNWaiters()} * {@value com.sleepycat.je.txn.LockStatDefinition#LOCK_WAITERS_DESC} *
* *

Operation Throughput Statistics

* *

Group Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Description: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_DESC}

* *

The following statistics are available. More information will be * provided in a future release.

* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
{@link #getPriSearchOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_SEARCH_DESC} *
{@link #getPriSearchFailOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_SEARCH_FAIL_DESC} *
{@link #getSecSearchOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_SEARCH_DESC} *
{@link #getSecSearchFailOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_SEARCH_FAIL_DESC} *
{@link #getPriPositionOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_POSITION_DESC} *
{@link #getSecPositionOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_POSITION_DESC} *
{@link #getPriInsertOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_INSERT_DESC} *
{@link #getPriInsertFailOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_INSERT_FAIL_DESC} *
{@link #getSecInsertOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_INSERT_DESC} *
{@link #getPriUpdateOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_UPDATE_DESC} *
{@link #getSecUpdateOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_UPDATE_DESC} *
{@link #getPriDeleteOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_DELETE_DESC} *
{@link #getPriDeleteFailOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_DELETE_FAIL_DESC} *
{@link #getSecDeleteOps()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_DELETE_DESC} *
* *

Btree Operation Statistics

* *

Group Name: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_GROUP_NAME} *
Description: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_GROUP_DESC}

* *

The following statistics are available. More information will be * provided in a future release.

* * * * * * * * * * * * * * *
{@link #getRelatchesRequired()} * {@value com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_RELATCHES_REQUIRED_DESC} *
{@link #getRootSplits()} * {@value com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_ROOT_SPLITS_DESC} *
{@link #getNBinDeltaGetOps()} * {@value com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_BIN_DELTA_GETS_DESC} *
{@link #getNBinDeltaInsertOps()} * {@value com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_BIN_DELTA_INSERTS_DESC} *
{@link #getNBinDeltaUpdateOps()} * {@value com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_BIN_DELTA_UPDATES_DESC} *
{@link #getNBinDeltaDeleteOps()} * {@value com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_BIN_DELTA_DELETES_DESC} *
* *

Miscellaneous Environment-Wide Statistics

* *

Group Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#ENV_GROUP_NAME} *
Description: {@value * com.sleepycat.je.dbi.DbiStatDefinition#ENV_GROUP_DESC}

* *

The following statistics are available. More information will be * provided in a future release.

* * * * *
{@link #getEnvironmentCreationTime()} * {@value com.sleepycat.je.dbi.DbiStatDefinition#ENV_CREATION_TIME_DESC} *
* * * * @see Viewing * Statistics with JConsole */ public class EnvironmentStats implements Serializable { /* find/replace: public static final StatDefinition\s+(\w+)\s*=\s*new StatDefinition\(\s*(".*"),\s*(".*")(,?\s*\w*\.?\w*)\); public static final String $1_NAME =\n $2;\n public static final String $1_DESC =\n $3;\n public static final StatDefinition $1 =\n new StatDefinition(\n $1_NAME,\n $1_DESC$4); */ private static final long serialVersionUID = 1734048134L; private StatGroup incompStats; private StatGroup cacheStats; private StatGroup offHeapStats; private StatGroup ckptStats; private StatGroup cleanerStats; private StatGroup logStats; private StatGroup lockStats; private StatGroup envImplStats; private StatGroup backupStats; private StatGroup btreeOpStats; private StatGroup throughputStats; private StatGroup taskCoordinatorStats; private StatGroup eraserStats; /** * @hidden * Internal use only. */ public EnvironmentStats() { incompStats = new StatGroup(INCompStatDefinition.GROUP_NAME, INCompStatDefinition.GROUP_DESC); cacheStats = new StatGroup(EvictorStatDefinition.GROUP_NAME, EvictorStatDefinition.GROUP_DESC); offHeapStats = new StatGroup(OffHeapStatDefinition.GROUP_NAME, OffHeapStatDefinition.GROUP_DESC); ckptStats = new StatGroup(CheckpointStatDefinition.GROUP_NAME, CheckpointStatDefinition.GROUP_DESC); cleanerStats = new StatGroup(CleanerStatDefinition.GROUP_NAME, CleanerStatDefinition.GROUP_DESC); logStats = new StatGroup(LogStatDefinition.GROUP_NAME, LogStatDefinition.GROUP_DESC); lockStats = new StatGroup(LockStatDefinition.GROUP_NAME, LockStatDefinition.GROUP_DESC); envImplStats = new StatGroup(DbiStatDefinition.ENV_GROUP_NAME, DbiStatDefinition.ENV_GROUP_DESC); backupStats = new StatGroup(DbiStatDefinition.BACKUP_GROUP_NAME, DbiStatDefinition.BACKUP_GROUP_DESC); btreeOpStats = new StatGroup( BTreeStatDefinition.BT_OP_GROUP_NAME, BTreeStatDefinition.BT_OP_GROUP_DESC); throughputStats = new StatGroup(DbiStatDefinition.THROUGHPUT_GROUP_NAME, DbiStatDefinition.THROUGHPUT_GROUP_DESC); taskCoordinatorStats = new StatGroup(TaskCoordinator.StatDefs.GROUP_NAME, TaskCoordinator.StatDefs.GROUP_DESC); eraserStats = new StatGroup(EraserStatDefinition.GROUP_NAME, EraserStatDefinition.GROUP_DESC); } /** * @hidden * Internal use only. */ public List getStatGroups() { /* * If this deserialized object was serialized prior to the addition of * certain stat groups, these fields will be null. Initialize them now. */ if (eraserStats == null) { eraserStats = new StatGroup( EraserStatDefinition.GROUP_NAME, EraserStatDefinition.GROUP_DESC); } if (taskCoordinatorStats == null) { taskCoordinatorStats = new StatGroup( TaskCoordinator.StatDefs.GROUP_NAME, TaskCoordinator.StatDefs.GROUP_DESC); } if (btreeOpStats == null) { btreeOpStats = new StatGroup( BTreeStatDefinition.BT_OP_GROUP_NAME, BTreeStatDefinition.BT_OP_GROUP_DESC); } if (backupStats == null) { backupStats = new StatGroup( DbiStatDefinition.BACKUP_GROUP_NAME, DbiStatDefinition.BACKUP_GROUP_DESC); } return Arrays.asList( logStats, cacheStats, offHeapStats, cleanerStats, incompStats, ckptStats, envImplStats, backupStats, btreeOpStats, lockStats, throughputStats, taskCoordinatorStats, eraserStats); } /** * @hidden * Internal use only. */ public Map getStatGroupsMap() { final HashMap map = new HashMap<>(); for (StatGroup group : getStatGroups()) { map.put(group.getName(), group); } return map; } /** * @hidden * Internal use only. */ public void setStatGroup(StatGroup sg) { switch (sg.getName()) { case INCompStatDefinition.GROUP_NAME: incompStats = sg; break; case EvictorStatDefinition.GROUP_NAME: cacheStats = sg; break; case OffHeapStatDefinition.GROUP_NAME: offHeapStats = sg; break; case CheckpointStatDefinition.GROUP_NAME: ckptStats = sg; break; case CleanerStatDefinition.GROUP_NAME: cleanerStats = sg; break; case LogStatDefinition.GROUP_NAME: logStats = sg; break; case LockStatDefinition.GROUP_NAME: lockStats = sg; break; case BTreeStatDefinition.BT_OP_GROUP_NAME: btreeOpStats = sg; break; case DbiStatDefinition.ENV_GROUP_NAME: envImplStats = sg; break; case DbiStatDefinition.BACKUP_GROUP_NAME: backupStats = sg; break; case DbiStatDefinition.THROUGHPUT_GROUP_NAME: throughputStats = sg; break; case TaskCoordinator.StatDefs.GROUP_NAME: taskCoordinatorStats = sg; break; case EraserStatDefinition.GROUP_NAME: eraserStats = sg; break; default: throw EnvironmentFailureException.unexpectedState( "Invalid stat group name in setStatGroup " + sg.getName()); } } /** * @hidden * Internal use only * For JConsole plugin support. */ public static String[] getStatGroupTitles() { List groups = new EnvironmentStats().getStatGroups(); final String[] titles = new String[groups.size()]; for (int i = 0; i < titles.length; i += 1) { titles[i] = groups.get(i).getName(); } return titles; } /** * @hidden * Internal use only. */ public void setThroughputStats(StatGroup stats) { throughputStats = stats; } /** * @hidden * Internal use only. */ public void setINCompStats(StatGroup stats) { incompStats = stats; } /** * @hidden * Internal use only. */ public void setTaskCoordinatorStats(StatGroup stats) { taskCoordinatorStats = stats; } /** * @hidden * Internal use only. */ public void setEraserStats(StatGroup stats) { eraserStats = stats; } /** * @hidden * Internal use only. */ public void setCkptStats(StatGroup stats) { ckptStats = stats; } /** * @hidden * Internal use only. */ public void setCleanerStats(StatGroup stats) { cleanerStats = stats; } /** * @hidden * Internal use only. */ public void setLogStats(StatGroup stats) { logStats = stats; } /** * @hidden * Internal use only. */ public void setMBAndEvictorStats(StatGroup clonedMBStats, StatGroup clonedEvictorStats) { cacheStats = clonedEvictorStats; cacheStats.addAll(clonedMBStats); } /** * @hidden * Internal use only. */ public void setOffHeapStats(StatGroup stats) { offHeapStats = stats; } /** * @hidden * Internal use only. */ public void setLockStats(StatGroup stats) { lockStats = stats; } /** * @hidden * Internal use only. */ public void setEnvStats(StatGroup stats) { envImplStats = stats; } /** * @hidden * Internal use only. */ public void setBackupStats(StatGroup stats) { backupStats = stats; } /** * @hidden * Internal use only. */ public void setBtreeOpStats(StatGroup stats) { btreeOpStats = stats; } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#ENV_CREATION_TIME_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#ENV_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#ENV_CREATION_TIME_NAME}

* * @see Miscellaneous Environment-Wide Statistics */ public long getEnvironmentCreationTime() { return envImplStats.getLong(ENV_CREATION_TIME); } /* INCompressor stats. */ /** *

{@value * com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_CURSORS_BINS_DESC}

* *

Group: {@value * com.sleepycat.je.incomp.INCompStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_CURSORS_BINS_NAME}

* * @see Node Compression Statistics */ public long getCursorsBins() { return incompStats.getLong(INCOMP_CURSORS_BINS); } /** *

{@value * com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_DBCLOSED_BINS_DESC}

* *

Group: {@value * com.sleepycat.je.incomp.INCompStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_DBCLOSED_BINS_NAME}

* * @see Node Compression Statistics */ public long getDbClosedBins() { return incompStats.getLong(INCOMP_DBCLOSED_BINS); } /** *

{@value * com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_QUEUE_SIZE_DESC}

* *

Group: {@value * com.sleepycat.je.incomp.INCompStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_QUEUE_SIZE_NAME}

* * @see Node Compression Statistics */ public long getInCompQueueSize() { return incompStats.getLong(INCOMP_QUEUE_SIZE); } /** *

{@value * com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_NON_EMPTY_BINS_DESC}

* *

Group: {@value * com.sleepycat.je.incomp.INCompStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_NON_EMPTY_BINS_NAME}

* * @see Node Compression Statistics */ public long getNonEmptyBins() { return incompStats.getLong(INCOMP_NON_EMPTY_BINS); } /** *

{@value * com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_PROCESSED_BINS_DESC}

* *

Group: {@value * com.sleepycat.je.incomp.INCompStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_PROCESSED_BINS_NAME}

* * @see Node Compression Statistics */ public long getProcessedBins() { return incompStats.getLong(INCOMP_PROCESSED_BINS); } /** *

{@value * com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_SPLIT_BINS_DESC}

* *

Group: {@value * com.sleepycat.je.incomp.INCompStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.incomp.INCompStatDefinition#INCOMP_SPLIT_BINS_NAME}

* * @see Node Compression Statistics */ public long getSplitBins() { return incompStats.getLong(INCOMP_SPLIT_BINS); } /* Checkpointer stats. */ /** *

{@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_LAST_CKPTID_DESC}

* *

Group: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_LAST_CKPTID_NAME}

* * @see Checkpoint Statistics */ public long getLastCheckpointId() { return ckptStats.getLong(CKPT_LAST_CKPTID); } /** *

{@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_CHECKPOINTS_DESC}

* *

Group: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_CHECKPOINTS_NAME}

* * @see Checkpoint Statistics */ public long getNCheckpoints() { return ckptStats.getLong(CKPT_CHECKPOINTS); } /** *

{@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_FULL_IN_FLUSH_DESC}

* *

Group: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_FULL_IN_FLUSH_NAME}

* * @see Checkpoint Statistics */ public long getNFullINFlush() { return ckptStats.getLong(CKPT_FULL_IN_FLUSH); } /** *

{@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_FULL_BIN_FLUSH_DESC}

* *

Group: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_FULL_BIN_FLUSH_NAME}

* * @see Checkpoint Statistics */ public long getNFullBINFlush() { return ckptStats.getLong(CKPT_FULL_BIN_FLUSH); } /** *

{@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_DELTA_IN_FLUSH_DESC}

* *

Group: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_DELTA_IN_FLUSH_NAME}

* * @see Checkpoint Statistics */ public long getNDeltaINFlush() { return ckptStats.getLong(CKPT_DELTA_IN_FLUSH); } /** *

{@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_LAST_CKPT_INTERVAL_DESC}

* *

Group: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_LAST_CKPT_INTERVAL_NAME}

* * @see Checkpoint Statistics */ public long getLastCheckpointInterval() { return ckptStats.getLong(CKPT_LAST_CKPT_INTERVAL); } /** *

{@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_LAST_CKPT_START_DESC}

* *

Group: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_LAST_CKPT_START_NAME}

* * @see Checkpoint Statistics */ public long getLastCheckpointStart() { return ckptStats.getLong(CKPT_LAST_CKPT_START); } /** *

{@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_LAST_CKPT_END_DESC}

* *

Group: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.recovery.CheckpointStatDefinition#CKPT_LAST_CKPT_END_NAME}

* * @see Checkpoint Statistics */ public long getLastCheckpointEnd() { return ckptStats.getLong(CKPT_LAST_CKPT_END); } /* Cleaner stats. */ /** * @deprecated in 7.0, always returns zero. Use {@link * #getCurrentMinUtilization()} and {@link #getCurrentMaxUtilization()} to * monitor cleaner behavior. */ public int getCleanerBacklog() { return 0; } /** * @deprecated in 7.5, always returns zero. Use {@link * #getProtectedLogSize()} {@link #getProtectedLogSizeMap()} to monitor * file protection. */ public int getFileDeletionBacklog() { return 0; } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_MIN_UTILIZATION_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_MIN_UTILIZATION_NAME}

* * @see Cleaner Statistics: Utilization * @since 6.5 */ public int getCurrentMinUtilization() { return cleanerStats.getInt(CLEANER_MIN_UTILIZATION); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_MAX_UTILIZATION_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_MAX_UTILIZATION_NAME}

* * @see Cleaner Statistics: Utilization * @since 6.5 */ public int getCurrentMaxUtilization() { return cleanerStats.getInt(CLEANER_MAX_UTILIZATION); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PREDICTED_MIN_UTILIZATION_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PREDICTED_MIN_UTILIZATION_NAME}

* * @see Cleaner Statistics: Utilization * @since 18.1 */ public int getPredictedMinUtilization() { return cleanerStats.getInt(CLEANER_PREDICTED_MIN_UTILIZATION); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PREDICTED_MAX_UTILIZATION_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PREDICTED_MAX_UTILIZATION_NAME}

* * @see Cleaner Statistics: Utilization * @since 18.1 */ public int getPredictedMaxUtilization() { return cleanerStats.getInt(CLEANER_PREDICTED_MAX_UTILIZATION); } /** * @deprecated in JE 6.5, use {@link #getCurrentMinUtilization()} or * {@link #getCurrentMaxUtilization()} instead. */ public int getLastKnownUtilization() { return getCurrentMinUtilization(); } /** * @deprecated in JE 6.3. Adjustments are no longer needed because LN log * sizes have been stored in the Btree since JE 6.0. */ public float getLNSizeCorrectionFactor() { return 1; } /** * @deprecated in JE 5.0.56. Adjustments are no longer needed because LN * log sizes have been stored in the Btree since JE 6.0. */ public float getCorrectedAvgLNSize() { return Float.NaN; } /** * @deprecated in JE 5.0.56. Adjustments are no longer needed because LN * log sizes have been stored in the Btree since JE 6.0. */ public float getEstimatedAvgLNSize() { return Float.NaN; } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_RUNS_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_RUNS_NAME}

* * This includes {@link #getNCleanerTwoPassRuns() two-pass runs} but not * {@link #getNCleanerRevisalRuns() revisal runs}. * * @see Cleaner Statistics */ public long getNCleanerRuns() { return cleanerStats.getLong(CLEANER_RUNS); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_TWO_PASS_RUNS_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_TWO_PASS_RUNS_NAME}

* * @see Cleaner Statistics: TTL and expired data * @since 6.5.0 */ public long getNCleanerTwoPassRuns() { return cleanerStats.getLong(CLEANER_TWO_PASS_RUNS); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_REVISAL_RUNS_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_REVISAL_RUNS_NAME}

* * @see Cleaner Statistics: TTL and expired data * @since 6.5.0 */ public long getNCleanerRevisalRuns() { return cleanerStats.getLong(CLEANER_REVISAL_RUNS); } /** * @deprecated in JE 6.3, always returns zero. */ public long getNCleanerProbeRuns() { return 0; } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_DELETIONS_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_DELETIONS_NAME}

* * @see Cleaning Statistics: Disk Space * Management */ public long getNCleanerDeletions() { return cleanerStats.getLong(CLEANER_DELETIONS); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LN_QUEUE_SIZE_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LN_QUEUE_SIZE_NAME}

* * @see Cleaning Statistics: Pending LNs and * DBs */ public int getPendingLNQueueSize() { return cleanerStats.getInt(CLEANER_PENDING_LN_QUEUE_SIZE); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_DB_QUEUE_SIZE_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_DB_QUEUE_SIZE_NAME}

* * @see Cleaning Statistics: Pending LNs and * DBs */ public int getPendingDBQueueSize() { return cleanerStats.getInt(CLEANER_PENDING_DB_QUEUE_SIZE); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_DISK_READS_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_DISK_READS_NAME}

* * @see Cleaning Efficiency */ public long getNCleanerDiskRead() { return cleanerStats.getLong(CLEANER_DISK_READS); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_ENTRIES_READ_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_ENTRIES_READ_NAME}

* * @see Cleaning Statistics: Processing * Details * @see Cleaning Efficiency */ public long getNCleanerEntriesRead() { return cleanerStats.getLong(CLEANER_ENTRIES_READ); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_OBSOLETE_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_OBSOLETE_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNINsObsolete() { return cleanerStats.getLong(CLEANER_INS_OBSOLETE); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_CLEANED_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_CLEANED_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNINsCleaned() { return cleanerStats.getLong(CLEANER_INS_CLEANED); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_DEAD_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_DEAD_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNINsDead() { return cleanerStats.getLong(CLEANER_INS_DEAD); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_MIGRATED_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_INS_MIGRATED_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNINsMigrated() { return cleanerStats.getLong(CLEANER_INS_MIGRATED); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_OBSOLETE_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_OBSOLETE_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNBINDeltasObsolete() { return cleanerStats.getLong(CLEANER_BIN_DELTAS_OBSOLETE); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_CLEANED_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_CLEANED_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNBINDeltasCleaned() { return cleanerStats.getLong(CLEANER_BIN_DELTAS_CLEANED); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_DEAD_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_DEAD_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNBINDeltasDead() { return cleanerStats.getLong(CLEANER_BIN_DELTAS_DEAD); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_MIGRATED_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_BIN_DELTAS_MIGRATED_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNBINDeltasMigrated() { return cleanerStats.getLong(CLEANER_BIN_DELTAS_MIGRATED); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_OBSOLETE_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_OBSOLETE_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNLNsObsolete() { return cleanerStats.getLong(CLEANER_LNS_OBSOLETE); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_EXPIRED_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_EXPIRED_NAME}

* * This total does not included embedded LNs. * * @see Cleaning Statistics: Processing * Details */ public long getNLNsExpired() { return cleanerStats.getLong(CLEANER_LNS_EXPIRED); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_EXTINCT_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_EXTINCT_NAME}

* * This total does not included embedded LNs. * * @see ExtinctionFilter * @see Cleaning Statistics: Processing * Details */ public long getNLNsExtinct() { return cleanerStats.getLong(CLEANER_LNS_EXTINCT); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_CLEANED_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_CLEANED_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNLNsCleaned() { return cleanerStats.getLong(CLEANER_LNS_CLEANED); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_DEAD_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_DEAD_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNLNsDead() { return cleanerStats.getLong(CLEANER_LNS_DEAD); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_LOCKED_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_LOCKED_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNLNsLocked() { return cleanerStats.getLong(CLEANER_LNS_LOCKED); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_MIGRATED_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_MIGRATED_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNLNsMigrated() { return cleanerStats.getLong(CLEANER_LNS_MIGRATED); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_MARKED_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNS_MARKED_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNLNsMarked() { return cleanerStats.getLong(CLEANER_LNS_MARKED); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNQUEUE_HITS_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_LNQUEUE_HITS_NAME}

* * @see Cleaning Statistics: Processing * Details */ public long getNLNQueueHits() { return cleanerStats.getLong(CLEANER_LNQUEUE_HITS); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LNS_PROCESSED_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LNS_PROCESSED_NAME}

* * @see Cleaning Statistics: Pending LNs and * DBs */ public long getNPendingLNsProcessed() { return cleanerStats.getLong(CLEANER_PENDING_LNS_PROCESSED); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LNS_LOCKED_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_LNS_LOCKED_NAME}

* * @see Cleaning Statistics: Pending LNs and * DBs */ public long getNPendingLNsLocked() { return cleanerStats.getLong(CLEANER_PENDING_LNS_LOCKED); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_DBS_PROCESSED_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_DBS_PROCESSED_NAME}

* * @see Cleaning Statistics: Pending LNs and * DBs */ public long getNPendingDBsProcessed() { return cleanerStats.getLong(CLEANER_PENDING_DBS_PROCESSED); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_DBS_INCOMPLETE_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PENDING_DBS_INCOMPLETE_NAME}

* * @see Cleaning Statistics: Pending LNs and * DBs */ public long getNPendingDBsIncomplete() { return cleanerStats.getLong(CLEANER_PENDING_DBS_INCOMPLETE); } /** * @deprecated always returns zero. */ public long getNMarkedLNsProcessed() { return 0; } /** * @deprecated always returns zero. */ public long getNToBeCleanedLNsProcessed() { return 0; } /** * @deprecated always returns zero. */ public long getNClusterLNsProcessed() { return 0; } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_ACTIVE_LOG_SIZE_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_ACTIVE_LOG_SIZE_NAME}

* *

The {@link #getCurrentMinUtilization() log utilization} is the * percentage of activeLogSize that is currently referenced or active.

* * @see Cleaning Statistics: Disk Space * Management * @since 7.5 */ public long getActiveLogSize() { return cleanerStats.getLong(CLEANER_ACTIVE_LOG_SIZE); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_RESERVED_LOG_SIZE_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_RESERVED_LOG_SIZE_NAME}

* *

Deletion of reserved files may be postponed for several reasons. * This occurs if an active file is protected (by a backup, for example), * and then the file is cleaned and becomes a reserved file. See * {@link #getProtectedLogSizeMap()} for more information. In a * standalone JE environment, reserved files are normally deleted very * soon after being cleaned.

* *

In an HA environment, reserved files are retained because they might * be used for replication to electable nodes that have been offline * for the {@link com.sleepycat.je.rep.ReplicationConfig#FEEDER_TIMEOUT} * interval or longer, or to offline secondary nodes. The replication * stream position of these nodes is unknown, so whether these files could * be used to avoid a network restore, when bringing these nodes online, * is also unknown. The files are retained just in case they can be used * for such replication. Files are reserved for replication on both master * and replicas, since a replica may become a master at a future time. * Such files will be deleted (oldest file first) to make room for a * write operation, if the write operation would have caused a disk limit * to be violated.

* *

In NoSQL DB, this retention of reserved files has the additional * benefit of supplying the replication stream to subscribers of the * Stream API, when such subscribers need to replay the stream from an * earlier point in time.

* * @see Cleaning Statistics: Disk Space * Management * @since 7.5 */ public long getReservedLogSize() { return cleanerStats.getLong(CLEANER_RESERVED_LOG_SIZE); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PROTECTED_LOG_SIZE_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PROTECTED_LOG_SIZE_NAME}

* *

Reserved files are protected for reasons described by {@link * #getProtectedLogSizeMap()}.

* * @see Cleaning Statistics: Disk Space * Management * @since 7.5 */ public long getProtectedLogSize() { return cleanerStats.getLong(CLEANER_PROTECTED_LOG_SIZE); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PROTECTED_LOG_SIZE_MAP_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_PROTECTED_LOG_SIZE_MAP_NAME}

* *

{@link #getReservedLogSize() Reserved} data files are temporarily * {@link #getProtectedLogSize() protected} for a number of reasons. The * keys in the protected log size map are the names of the protecting * entities, and the values are the number of bytes protected by each * entity. The type and format of the entity names are as follows:

* *
     *    Backup-N
     *    DatabaseCount-N
     *    DiskOrderedCursor-N
     *    Syncup-N
     *    Feeder-N
     *    NetworkRestore-N
     * 
* *

Where:

*
    *
  • * {@code Backup-N} represents a {@link DbBackup} in progress, * i.e., for which {@link DbBackup#startBackup()} has been called * and {@link DbBackup#endBackup()} has not yet been called. All * active files are initially protected by the backup, but these * are not reserved files ond only appear in the map if they are * cleaned and become reserved after the backup starts. Files * are not protected if they have been copied and * {@link DbBackup#removeFileProtection(String)} has been called. * {@code N} is a sequentially assigned integer. *
  • *
  • * {@code DatabaseCount-N} represents an outstanding call to * {@link Database#count()}. * All active files are initially protected by this method, but * these are not reserved files ond only appear in the map if * they are cleaned and become reserved during the execution of * {@code Database.count}. * {@code N} is a sequentially assigned integer. *
  • *
  • * {@code DiskOrderedCursor-N} represents a * {@link DiskOrderedCursor} that has not yet been closed by * {@link DiskOrderedCursor#close()}. * All active files are initially protected when the cursor is * opened, but these are not reserved files ond only appear in * the map if they are cleaned and become reserved while the * cursor is open. * {@code N} is a sequentially assigned integer. *
  • *
  • * {@code Syncup-N} represents an in-progress negotiation between * a master and replica node in an HA replication group to * establish a replication stream. This is a normally a very short * negotiation and occurs when a replica joins the group or after * an election is held. During syncup, all reserved files are * protected. * {@code N} is the node name of the other node involved in the * syncup, i.e, if this node is a master then it is the name of * the replica, and vice versa. *
  • *
  • * {@code Feeder-N} represents an HA master node that is supplying * the replication stream to a replica. Normally data in active * files is being supplied and this data is not in the reserved * or protected categories. But if the replica is lagging, data * from reserved files may be supplied, and in that case will be * protected and appear in the map. * {@code N} is the node name of the replica receiving the * replication stream. *
  • *
  • * {@code NetworkRestore-N} represents an HA replica or master * node that is supplying files to a node that is performing a * {@link com.sleepycat.je.rep.NetworkRestore}. The files supplied * are all active files plus the two most recently written * reserved files. The two reserved files will appear in the map, * as well as any of the active files that were cleaned and became * reserved during the network restore. Files that have already * been copied by the network restore are not protected. * {@code N} is the name of the node performing the * {@link com.sleepycat.je.rep.NetworkRestore}. *
  • *
* *

When more than one entity is included in the map, in general the * largest value points to the entity primarily responsible for * preventing reclamation of disk space. Note that the values normally * sum to more than {@link #getProtectedLogSize()}, since protection often * overlaps.

* *

The string format of this stat consists of {@code name=size} pairs * separated by semicolons, where name is the entity name described * above and size is the number of protected bytes.

* * @see Cleaning Statistics: Disk Space * Management * @since 7.5 */ public SortedMap getProtectedLogSizeMap() { return cleanerStats.getMap(CLEANER_PROTECTED_LOG_SIZE_MAP); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_AVAILABLE_LOG_SIZE_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_AVAILABLE_LOG_SIZE_NAME}

* *

This is the amount that can be logged by write operations, and * other JE activity such as checkpointing, without violating a disk * limit. The files making up {@code reservedLogSize} can be deleted to * make room for these write operations, so {@code availableLogSize} is * the sum of the current disk free space and the reserved size that is not * protected ({@code reservedLogSize} - {@code protectedLogSize}). The * current disk free space is calculated using the disk volume's free * space, {@link EnvironmentConfig#MAX_DISK} and {@link * EnvironmentConfig#FREE_DISK}.

* *

Note that when a record is written, the number of bytes includes JE * overheads for the record. Also, this causes Btree metadata to be * written during checkpoints, and other metadata is also written by JE. * So the space occupied on disk by a given set of records cannot be * calculated by simply summing the key/data sizes.

* *

Also note that {@code availableLogSize} will be negative when a disk * limit has been violated, representing the amount that needs to be freed * before write operations are allowed.

* * @see Cleaning Statistics: Disk Space * Management * @see EnvironmentConfig#MAX_DISK * @see EnvironmentConfig#FREE_DISK * @since 7.5 */ public long getAvailableLogSize() { return cleanerStats.getLong(CLEANER_AVAILABLE_LOG_SIZE); } /** *

{@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_TOTAL_LOG_SIZE_DESC}

* *

Group: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.cleaner.CleanerStatDefinition#CLEANER_TOTAL_LOG_SIZE_NAME}

* * @see Cleaning Statistics: Disk Space * Management */ public long getTotalLogSize() { return cleanerStats.getLong(CLEANER_TOTAL_LOG_SIZE); } /* LogManager stats. */ /** *

{@value * com.sleepycat.je.log.LogStatDefinition#LBFP_MISS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#LBFP_MISS_NAME}

* * @see I/O Statistics: Log Buffers */ public long getNCacheMiss() { return logStats.getAtomicLong(LBFP_MISS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#LOGMGR_END_OF_LOG_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#LOGMGR_END_OF_LOG_NAME}

* *

Note that the log entries prior to this position may not yet have * been flushed to disk. Flushing can be forced using a Sync or * WriteNoSync commit, or a checkpoint.

* * @see I/O Statistics: Logging Critical Section */ public long getEndOfLog() { return logStats.getLong(LOGMGR_END_OF_LOG); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FSYNCMGR_FSYNCS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FSYNCMGR_FSYNCS_NAME}

* * @see I/O Statistics: Fsync and Group Commit */ public long getNFSyncs() { return logStats.getAtomicLong(FSYNCMGR_FSYNCS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FSYNCMGR_FSYNC_REQUESTS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FSYNCMGR_FSYNC_REQUESTS_NAME}

* * @see I/O Statistics: Fsync and Group Commit */ public long getNFSyncRequests() { return logStats.getLong(FSYNCMGR_FSYNC_REQUESTS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FSYNCMGR_TIMEOUTS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FSYNCMGR_TIMEOUTS_NAME}

* * @see I/O Statistics: Fsync and Group Commit */ public long getNFSyncTimeouts() { return logStats.getLong(FSYNCMGR_TIMEOUTS); } /** * @deprecated in 18.3, always 0. Use {@link #getFSyncAvgMs} to get an * estimate of fsync times. */ public long getFSyncTime() { return 0; } /** * @deprecated in 18.3, always 0. Use {@link #getFSyncMaxMs} instead. */ public long getFSyncMaxTime() { return 0; } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_FSYNC_AVG_MS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_FSYNC_AVG_MS_NAME}

* * @see I/O Statistics: Fsync and Group Commit * * @since 18.3 */ public long getFSyncAvgMs() { return logStats.getLong(FILEMGR_FSYNC_AVG_MS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_FSYNC_95_MS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_FSYNC_95_MS_NAME}

* * @see I/O Statistics: Fsync and Group Commit * * @since 18.3 */ public long getFSync95Ms() { return logStats.getLong(FILEMGR_FSYNC_95_MS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_FSYNC_99_MS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_FSYNC_99_MS_NAME}

* * @see I/O Statistics: Fsync and Group Commit * * @since 18.3 */ public long getFSync99Ms() { return logStats.getLong(FILEMGR_FSYNC_99_MS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_FSYNC_MAX_MS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_FSYNC_MAX_MS_NAME}

* * @see I/O Statistics: Fsync and Group Commit * * @since 18.3 */ public long getFSyncMaxMs() { return logStats.getLong(FILEMGR_FSYNC_MAX_MS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FSYNCMGR_N_GROUP_COMMIT_REQUESTS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FSYNCMGR_N_GROUP_COMMIT_REQUESTS_NAME}

* * @see I/O Statistics: Fsync and Group Commit * * @since 18.1, although the stat was output by {@link #toString} and * appeared in the je.stat.csv file in earlier versions. */ public long getNGroupCommitRequests() { return logStats.getLong(FSYNCMGR_N_GROUP_COMMIT_REQUESTS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_LOG_FSYNCS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_LOG_FSYNCS_NAME}

* * @see I/O Statistics: Fsync and Group Commit */ public long getNLogFSyncs() { return logStats.getLong(FILEMGR_LOG_FSYNCS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#LBFP_LOG_BUFFERS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#LBFP_LOG_BUFFERS_NAME}

* * @see I/O Statistics: Log Buffers */ public int getNLogBuffers() { return logStats.getInt(LBFP_LOG_BUFFERS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_RANDOM_READS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_RANDOM_READS_NAME}

* * @see I/O Statistics: File Access */ public long getNRandomReads() { return logStats.getLong(FILEMGR_RANDOM_READS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_RANDOM_READ_BYTES_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_RANDOM_READ_BYTES_NAME}

* * @see I/O Statistics: File Access */ public long getNRandomReadBytes() { return logStats.getLong(FILEMGR_RANDOM_READ_BYTES); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_RANDOM_WRITES_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_RANDOM_WRITES_NAME}

* * @see I/O Statistics: File Access */ public long getNRandomWrites() { return logStats.getLong(FILEMGR_RANDOM_WRITES); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_RANDOM_WRITE_BYTES_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_RANDOM_WRITE_BYTES_NAME}

* * @see I/O Statistics: File Access */ public long getNRandomWriteBytes() { return logStats.getLong(FILEMGR_RANDOM_WRITE_BYTES); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_SEQUENTIAL_READS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_SEQUENTIAL_READS_NAME}

* * @see I/O Statistics: File Access */ public long getNSequentialReads() { return logStats.getLong(FILEMGR_SEQUENTIAL_READS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_SEQUENTIAL_READ_BYTES_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_SEQUENTIAL_READ_BYTES_NAME}

* * @see I/O Statistics: File Access */ public long getNSequentialReadBytes() { return logStats.getLong(FILEMGR_SEQUENTIAL_READ_BYTES); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_SEQUENTIAL_WRITES_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_SEQUENTIAL_WRITES_NAME}

* * @see I/O Statistics: File Access */ public long getNSequentialWrites() { return logStats.getLong(FILEMGR_SEQUENTIAL_WRITES); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_SEQUENTIAL_WRITE_BYTES_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_SEQUENTIAL_WRITE_BYTES_NAME}

* * @see I/O Statistics: File Access */ public long getNSequentialWriteBytes() { return logStats.getLong(FILEMGR_SEQUENTIAL_WRITE_BYTES); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_BYTES_READ_FROM_WRITEQUEUE_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_BYTES_READ_FROM_WRITEQUEUE_NAME}

* * @see I/O Statistics: The Write Queue */ public long getNBytesReadFromWriteQueue() { return logStats.getLong(FILEMGR_BYTES_READ_FROM_WRITEQUEUE); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE_NAME}

* * @see I/O Statistics: The Write Queue */ public long getNBytesWrittenFromWriteQueue() { return logStats.getLong(FILEMGR_BYTES_WRITTEN_FROM_WRITEQUEUE); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_READS_FROM_WRITEQUEUE_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_READS_FROM_WRITEQUEUE_NAME}

* * @see I/O Statistics: The Write Queue */ public long getNReadsFromWriteQueue() { return logStats.getLong(FILEMGR_READS_FROM_WRITEQUEUE); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_WRITES_FROM_WRITEQUEUE_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_WRITES_FROM_WRITEQUEUE_NAME}

* * @see I/O Statistics: The Write Queue */ public long getNWritesFromWriteQueue() { return logStats.getLong(FILEMGR_WRITES_FROM_WRITEQUEUE); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_WRITEQUEUE_OVERFLOW_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_WRITEQUEUE_OVERFLOW_NAME}

* * @see I/O Statistics: The Write Queue */ public long getNWriteQueueOverflow() { return logStats.getLong(FILEMGR_WRITEQUEUE_OVERFLOW); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES_NAME}

* * @see I/O Statistics: The Write Queue */ public long getNWriteQueueOverflowFailures() { return logStats.getLong(FILEMGR_WRITEQUEUE_OVERFLOW_FAILURES); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#LBFP_BUFFER_BYTES_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#LBFP_BUFFER_BYTES_NAME}

* *

If this environment uses the shared cache, this method returns * only the amount used by this environment.

* * @see I/O Statistics: Log Buffers */ public long getBufferBytes() { return logStats.getLong(LBFP_BUFFER_BYTES); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#LBFP_NOT_RESIDENT_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#LBFP_NOT_RESIDENT_NAME}

* * @see I/O Statistics: Log Buffers */ public long getNNotResident() { return logStats.getAtomicLong(LBFP_NOT_RESIDENT); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#LOGMGR_REPEAT_FAULT_READS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#LOGMGR_REPEAT_FAULT_READS_NAME}

* * @see I/O Statistics: File Access */ public long getNRepeatFaultReads() { return logStats.getLong(LOGMGR_REPEAT_FAULT_READS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#LOGMGR_REPEAT_ITERATOR_READS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#LOGMGR_REPEAT_ITERATOR_READS_NAME}

* *

This happens during scans of the log during activities like * environment open (recovery) or log cleaning. The repeat iterator reads} * can be reduced by increasing * {@link EnvironmentConfig#LOG_ITERATOR_MAX_SIZE}.

* * @see I/O Statistics: File Access */ public long getNRepeatIteratorReads() { return logStats.getLong(LOGMGR_REPEAT_ITERATOR_READS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#LOGMGR_TEMP_BUFFER_WRITES_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#LOGMGR_TEMP_BUFFER_WRITES_NAME}

* * @see I/O Statistics: Log Buffers */ public long getNTempBufferWrites() { return logStats.getLong(LOGMGR_TEMP_BUFFER_WRITES); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#LBFP_NO_FREE_BUFFER_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#LBFP_NO_FREE_BUFFER_NAME}

* * @see I/O Statistics: Log Buffers * * @since 18.1, although the stat was output by {@link #toString} and * appeared in the je.stat.csv file in earlier versions. */ public long getNNoFreeBuffer() { return logStats.getLong(LBFP_NO_FREE_BUFFER); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_FILE_OPENS_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_FILE_OPENS_NAME}

* * @see I/O Statistics: File Access */ public int getNFileOpens() { return logStats.getInt(FILEMGR_FILE_OPENS); } /** *

{@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_OPEN_FILES_DESC}

* *

Group: {@value * com.sleepycat.je.log.LogStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.log.LogStatDefinition#FILEMGR_OPEN_FILES_NAME}

* * @see I/O Statistics: File Access */ public int getNOpenFiles() { return logStats.getInt(FILEMGR_OPEN_FILES); } /* Return Evictor stats. */ /** * @deprecated The method returns 0 always. */ public long getRequiredEvictBytes() { return 0; } /** * @deprecated This statistic has no meaning after the implementation * of the new evictor in JE 6.0. The method returns 0 always. */ public long getNNodesScanned() { return 0; } /** * @deprecated Use {@link #getNEvictionRuns()} instead. */ public long getNEvictPasses() { return cacheStats.getLong(EVICTOR_EVICTION_RUNS); } /** * @deprecated use {@link #getNNodesTargeted()} instead. */ public long getNNodesSelected() { return cacheStats.getLong(EVICTOR_NODES_TARGETED); } /** * @deprecated Use {@link #getNNodesEvicted()} instead. */ public long getNNodesExplicitlyEvicted() { return cacheStats.getLong(EVICTOR_NODES_EVICTED); } /** * @deprecated Use {@link #getNNodesStripped()} instead. */ public long getNBINsStripped() { return cacheStats.getLong(EVICTOR_NODES_STRIPPED); } /** * @deprecated Use {@link #getNNodesMutated()} instead. */ public long getNBINsMutated() { return cacheStats.getLong(EVICTOR_NODES_MUTATED); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_EVICTION_RUNS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_EVICTION_RUNS_NAME}

* *

When an evictor thread is awoken it performs eviction until * {@link #getCacheTotalBytes()} is at least * {@link EnvironmentConfig#EVICTOR_EVICT_BYTES} less than the * {@link EnvironmentConfig#MAX_MEMORY_PERCENT total cache size}. * See {@link CacheMode} for a description of eviction.

* * @see Cache Statistics */ public long getNEvictionRuns() { return cacheStats.getLong(EVICTOR_EVICTION_RUNS); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_TARGETED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_TARGETED_NAME}

* *

An eviction target may actually be evicted, or skipped, or put back * to the LRU, potentially after partial eviction (stripping) or * BIN-delta mutation is done on it. * See {@link CacheMode} for a description of eviction.

* * @see Cache Statistics: Debugging */ public long getNNodesTargeted() { return cacheStats.getLong(EVICTOR_NODES_TARGETED); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_EVICTED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_EVICTED_NAME}

* *

Does not include {@link #getNLNsEvicted() LN eviction} or * {@link #getNNodesMutated() BIN-delta mutation}. * Includes eviction of {@link #getNDirtyNodesEvicted() dirty nodes} and * {@link #getNRootNodesEvicted() root nodes}. * See {@link CacheMode} for a description of eviction.

* * @see Cache Statistics: Eviction */ public long getNNodesEvicted() { return cacheStats.getLong(EVICTOR_NODES_EVICTED); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_ROOT_NODES_EVICTED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_ROOT_NODES_EVICTED_NAME}

* *

The root node of a Database is only evicted after all other nodes in * the Database, so this implies that the entire Database has fallen out of * cache and is probably closed. * See {@link CacheMode} for a description of eviction.

* * @see Cache Statistics: Debugging */ public long getNRootNodesEvicted() { return cacheStats.getLong(EVICTOR_ROOT_NODES_EVICTED); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_DIRTY_NODES_EVICTED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_DIRTY_NODES_EVICTED_NAME}

* *

When a dirty IN is evicted from main cache and no off-heap cache is * configured, the IN must be logged. When an off-heap cache is configured, * dirty INs can be moved from main cache to off-heap cache based on LRU, * but INs are only logged when they are evicted from off-heap cache. * Therefore, this stat is always zero when an off-heap cache is configured. * See {@link CacheMode} for a description of eviction.

* * @see Cache Statistics: Eviction */ public long getNDirtyNodesEvicted() { return cacheStats.getLong(EVICTOR_DIRTY_NODES_EVICTED); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_LNS_EVICTED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_LNS_EVICTED_NAME}

* *

When a BIN is considered for eviction based on LRU, if the BIN * contains resident LNs in main cache, it is stripped of the LNs rather * than being evicted. This stat reflects LNs evicted in this manner, but * not LNs evicted as a result of using {@link CacheMode#EVICT_LN}. Also * note that {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN embedded} LNs * are evicted immediately and are not reflected in this stat value. * See {@link CacheMode} for a description of eviction.

* * @see Cache Statistics: Eviction */ public long getNLNsEvicted() { return cacheStats.getLong(EVICTOR_LNS_EVICTED); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_STRIPPED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_STRIPPED_NAME}

* *

If space can be reclaimed by stripping a BIN, this prevents mutating * the BIN to a BIN-delta or evicting the BIN entirely. * See {@link CacheMode} for a description of eviction.

* * @see Cache Statistics: Debugging */ public long getNNodesStripped() { return cacheStats.getLong(EVICTOR_NODES_STRIPPED); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_MUTATED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_MUTATED_NAME}

* *

When a BIN is considered for eviction based on LRU, if the BIN * can be mutated to a BIN-delta, it is mutated rather than being evicted. * Note that when an off-heap cache is configured, this stat value will be * zero because BIN mutation will take place only in the off-heap cache; * see {@link #getOffHeapNodesMutated()}. * See {@link CacheMode} for a description of eviction.

* * @see Cache Statistics: Eviction */ public long getNNodesMutated() { return cacheStats.getLong(EVICTOR_NODES_MUTATED); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_PUT_BACK_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_PUT_BACK_NAME}

* *

Reasons for putting back a target IN are:

*
    *
  • The IN was accessed by an operation while the evictor was * processing it.
  • *
  • To prevent the cache usage for Btree objects from falling below * {@link EnvironmentConfig#TREE_MIN_MEMORY}.
  • *
* *

See {@link CacheMode} for a description of eviction.

* * @see Cache Statistics: Debugging */ public long getNNodesPutBack() { return cacheStats.getLong(EVICTOR_NODES_PUT_BACK); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_MOVED_TO_PRI2_LRU_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_MOVED_TO_PRI2_LRU_NAME}

* *

When an off-cache is not configured, dirty nodes are evicted last * from the main cache by moving them to a 2nd priority LRU list. When an * off-cache is configured, level-2 INs that reference off-heap BINs are * evicted last from the main cache, using the same approach. * See {@link CacheMode} for a description of eviction.

* * @see Cache Statistics: Debugging */ public long getNNodesMovedToDirtyLRU() { return cacheStats.getLong(EVICTOR_NODES_MOVED_TO_PRI2_LRU); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_SKIPPED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_NODES_SKIPPED_NAME}

* *

Reasons for skipping a target IN are:

*
    *
  • It has already been evicted by another thread.
  • *
  • It cannot be evicted because concurrent activity added resident * child nodes.
  • *
  • It cannot be evicted because it is dirty and the environment is * read-only.
  • *
*

See {@link CacheMode} for a description of eviction.

* * @see Cache Statistics: Debugging */ public long getNNodesSkipped() { return cacheStats.getLong(EVICTOR_NODES_SKIPPED); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#THREAD_UNAVAILABLE_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#THREAD_UNAVAILABLE_NAME}

* * @see Cache Statistics: Critical * Eviction */ public long getNThreadUnavailable() { return cacheStats.getAtomicLong(THREAD_UNAVAILABLE); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_SHARED_CACHE_ENVS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#EVICTOR_SHARED_CACHE_ENVS_NAME}

* *

This method says nothing about whether this environment is using * the shared cache or not.

* */ public int getNSharedCacheEnvironments() { return cacheStats.getInt(EVICTOR_SHARED_CACHE_ENVS); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#LN_FETCH_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#LN_FETCH_NAME}

* *

Note that the number of LN fetches does not necessarily correspond * to the number of records accessed, since some LNs may be * {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN embedded}.

* * @see Cache Statistics: Sizing */ public long getNLNsFetch() { return cacheStats.getAtomicLong(LN_FETCH); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_NAME}

* * @see Cache Statistics: Sizing */ public long getNBINsFetch() { return cacheStats.getAtomicLong(BIN_FETCH); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#UPPER_IN_FETCH_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#UPPER_IN_FETCH_NAME}

* * @see Cache Statistics: Sizing */ public long getNUpperINsFetch() { return cacheStats.getAtomicLong(UPPER_IN_FETCH); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#LN_FETCH_MISS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#LN_FETCH_MISS_NAME}

* *

Note that the number of LN fetches does not necessarily correspond * to the number of records accessed, since some LNs may be * {@link EnvironmentConfig#TREE_MAX_EMBEDDED_LN embedded}.

* * @see Cache Statistics: Sizing */ public long getNLNsFetchMiss() { return cacheStats.getAtomicLong(LN_FETCH_MISS); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_MISS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_MISS_NAME}

* *

This is the portion of {@link #getNBINsFetch()} that resulted in a * fetch miss. The fetch may be for a full BIN or BIN-delta * ({@link #getNBINDeltasFetchMiss()}), depending on whether a BIN-delta * currently exists (see {@link EnvironmentConfig#TREE_BIN_DELTA}). * However, additional full BIN fetches occur when mutating a BIN-delta to * a full BIN ({@link #getNFullBINsMiss()}) whenever this is necessary for * completing an operation.

* *

Therefore, the total number of BIN fetch misses * (including BIN-deltas) is:

* *

{@code nFullBINsMiss + nBINsFetchMiss}

* *

And the total number of full BIN (vs BIN-delta) fetch misses is:

* *

{@code nFullBINsMiss + nBINsFetchMiss - * nBINDeltasFetchMiss}

* * @see Cache Statistics: Sizing */ public long getNBINsFetchMiss() { return cacheStats.getAtomicLong(BIN_FETCH_MISS); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_DELTA_FETCH_MISS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_DELTA_FETCH_MISS_NAME}

* *

This represents the portion of {@code nBINsFetchMiss()} that fetched * BIN-deltas rather than full BINs. See {@link #getNBINsFetchMiss()}.

* * @see Cache Statistics: Sizing */ public long getNBINDeltasFetchMiss() { return cacheStats.getAtomicLong(BIN_DELTA_FETCH_MISS); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#FULL_BIN_MISS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#FULL_BIN_MISS_NAME}

* *

Note that this stat does not include full BIN misses that are * not due to BIN-delta mutations. See * {@link #getNBINsFetchMiss()}

* * @see Cache Statistics: Sizing */ public long getNFullBINsMiss() { return cacheStats.getAtomicLong(FULL_BIN_MISS); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#UPPER_IN_FETCH_MISS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#UPPER_IN_FETCH_MISS_NAME}

* * @see Cache Statistics: Sizing */ public long getNUpperINsFetchMiss() { return cacheStats.getAtomicLong(UPPER_IN_FETCH_MISS); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_MISS_RATIO_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_FETCH_MISS_RATIO_NAME}

* *

This stat can be misleading because it does not include the number * of full BIN fetch misses resulting from BIN-delta mutations ({@link * #getNFullBINsMiss()}. It may be improved, or perhaps deprecated, in a * future release.

* * @see Cache Statistics: Debugging */ public float getNBINsFetchMissRatio() { return cacheStats.getFloat(BIN_FETCH_MISS_RATIO); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_DELTA_BLIND_OPS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#BIN_DELTA_BLIND_OPS_NAME}

* *

Note that this stat is misplaced. It should be in the * {@value com.sleepycat.je.dbi.DbiStatDefinition#ENV_GROUP_NAME} group * and will probably be moved there in a future release.

* * @see Cache Statistics: Debugging * @see EnvironmentConfig#TREE_BIN_DELTA */ public long getNBINDeltaBlindOps() { return cacheStats.getAtomicLong(BIN_DELTA_BLIND_OPS); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_UPPER_INS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_UPPER_INS_NAME} *
Zero is returned when {@link StatsConfig#setFast fast stats} * are requested and a shared cache is configured.

* * @see Cache Statistics: Sizing */ public long getNCachedUpperINs() { return cacheStats.getLong(CACHED_UPPER_INS); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_BINS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_BINS_NAME} *
Zero is returned when {@link StatsConfig#setFast fast stats} * are requested and a shared cache is configured.

* * @see Cache Statistics: Sizing */ public long getNCachedBINs() { return cacheStats.getLong(CACHED_BINS); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_BIN_DELTAS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_BIN_DELTAS_NAME} *
Zero is returned when {@link StatsConfig#setFast fast stats} * are requested and a shared cache is configured.

* * @see Cache Statistics: Sizing */ public long getNCachedBINDeltas() { return cacheStats.getLong(CACHED_BIN_DELTAS); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_SPARSE_TARGET_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_SPARSE_TARGET_NAME}

* *

Each IN contains an array of references to child INs or LNs. When * there are between one and four children resident, the size of the array * is reduced to four. This saves a significant amount of cache memory for * BINs when {@link CacheMode#EVICT_LN} is used, because there are * typically only a small number of LNs resident in main cache.

* * @see Cache Statistics: Debugging */ public long getNINSparseTarget() { return cacheStats.getLong(CACHED_IN_SPARSE_TARGET); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_NO_TARGET_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_NO_TARGET_NAME}

* *

Each IN contains an array of references to child INs or LNs. When * there are no children resident, no array is allocated. This saves a * significant amount of cache memory for BINs when {@link * CacheMode#EVICT_LN} is used, because there are typically only a small * number of LNs resident in main cache.

* * @see Cache Statistics: Debugging */ public long getNINNoTarget() { return cacheStats.getLong(CACHED_IN_NO_TARGET); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_COMPACT_KEY_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#CACHED_IN_COMPACT_KEY_NAME}

* * @see Cache Statistics: Size * Optimizations * * @see EnvironmentConfig#TREE_COMPACT_MAX_KEY_LENGTH */ public long getNINCompactKeyIN() { return cacheStats.getLong(CACHED_IN_COMPACT_KEY); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#PRI2_LRU_SIZE_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#PRI2_LRU_SIZE_NAME}

* * @see Cache Statistics: Debugging * @see #getNNodesMovedToDirtyLRU() */ public long getDirtyLRUSize() { return cacheStats.getLong(PRI2_LRU_SIZE); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#PRI1_LRU_SIZE_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#PRI1_LRU_SIZE_NAME}

* * @see Cache Statistics: Debugging * @see #getNNodesMovedToDirtyLRU() */ public long getMixedLRUSize() { return cacheStats.getLong(PRI1_LRU_SIZE); } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNBINsEvictedEvictorThread() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNBINsEvictedManual() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNBINsEvictedCritical() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNBINsEvictedCacheMode() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNBINsEvictedDaemon() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNUpperINsEvictedEvictorThread() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNUpperINsEvictedManual() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNUpperINsEvictedCritical() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNUpperINsEvictedCacheMode() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNUpperINsEvictedDaemon() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNBatchesEvictorThread() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNBatchesManual() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNBatchesCacheMode() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNBatchesCritical() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getNBatchesDaemon() { return 0; } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_EVICTORTHREAD_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_EVICTORTHREAD_NAME}

* * @see Cache Statistics: Critical * Eviction */ public long getNBytesEvictedEvictorThread() { return cacheStats.getLong( EvictionSource.EVICTORTHREAD.getNumBytesEvictedStatDef()); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_MANUAL_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_MANUAL_NAME}

* * @see Cache Statistics: Critical * Eviction */ public long getNBytesEvictedManual() { return cacheStats.getLong( EvictionSource.MANUAL.getNumBytesEvictedStatDef()); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_CACHEMODE_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_CACHEMODE_NAME}

* * @see Cache Statistics: Critical * Eviction */ public long getNBytesEvictedCacheMode() { return cacheStats.getLong( EvictionSource.CACHEMODE.getNumBytesEvictedStatDef()); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_CRITICAL_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_CRITICAL_NAME}

* * @see Cache Statistics: Critical * Eviction */ public long getNBytesEvictedCritical() { return cacheStats.getLong( EvictionSource.CRITICAL.getNumBytesEvictedStatDef()); } /** *

{@value * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_DAEMON_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#N_BYTES_EVICTED_DAEMON_NAME}

* * @see Cache Statistics: Critical * Eviction */ public long getNBytesEvictedDeamon() { return cacheStats.getLong( EvictionSource.DAEMON.getNumBytesEvictedStatDef()); } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getAvgBatchEvictorThread() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getAvgBatchManual() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getAvgBatchCacheMode() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getAvgBatchCritical() { return 0; } /** * @deprecated This statistic has been removed. The method returns 0 * always. */ public long getAvgBatchDaemon() { return 0; } /* MemoryBudget stats. */ /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#MB_SHARED_CACHE_TOTAL_BYTES_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#MB_SHARED_CACHE_TOTAL_BYTES_NAME}

* *

If this * environment uses the shared cache, this method returns the total size of * the shared cache, i.e., the sum of the {@link #getCacheTotalBytes()} for * all environments that are sharing the cache. If this environment does * not use the shared cache, this method returns zero.

* *

To get the configured maximum cache size, see {@link * EnvironmentMutableConfig#getCacheSize}.

*/ public long getSharedCacheTotalBytes() { return cacheStats.getLong(MB_SHARED_CACHE_TOTAL_BYTES); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#MB_TOTAL_BYTES_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#MB_TOTAL_BYTES_NAME}

* *

This method returns the sum of {@link #getDataBytes}, {@link * #getAdminBytes}, {@link #getLockBytes} and {@link #getBufferBytes}.

* *

If this environment uses the shared cache, this method returns only * the amount used by this environment.

* *

To get the configured maximum cache size, see {@link * EnvironmentMutableConfig#getCacheSize}.

* * @see Cache Statistics: Sizing */ public long getCacheTotalBytes() { return cacheStats.getLong(MB_TOTAL_BYTES); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#MB_DATA_BYTES_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#MB_DATA_BYTES_NAME}

* *

If this environment uses the shared cache, this method returns only * the amount used by this environment.

* * @see Cache Statistics: Debugging */ public long getDataBytes() { return cacheStats.getLong(MB_DATA_BYTES); } /** * @deprecated as of JE 18.1, always returns zero. */ public long getDataAdminBytes() { return 0; } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#MB_DOS_BYTES_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#MB_DOS_BYTES_NAME}

* *

If this environment uses the shared cache, this method returns only * the amount used by this environment.

* * @see Cache Statistics: Unexpected * Sizes */ public long getDOSBytes() { return cacheStats.getLong(MB_DOS_BYTES); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#MB_ADMIN_BYTES_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#MB_ADMIN_BYTES_NAME}

* *

If this environment uses the shared cache, this method returns only * the amount used by this environment.

* * @see Cache Statistics: Debugging */ public long getAdminBytes() { return cacheStats.getLong(MB_ADMIN_BYTES); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#MB_LOCK_BYTES_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.EvictorStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#MB_LOCK_BYTES_NAME}

* *

If this environment uses the shared cache, this method returns only * the amount used by this environment.

* * @see Cache Statistics: Unexpected * Sizes */ public long getLockBytes() { return cacheStats.getLong(MB_LOCK_BYTES); } /** * @deprecated Please use {@link #getDataBytes} to get the amount of cache * used for data and use {@link #getAdminBytes}, {@link #getLockBytes} and * {@link #getBufferBytes} to get other components of the total cache usage * ({@link #getCacheTotalBytes}). */ public long getCacheDataBytes() { return getCacheTotalBytes() - getBufferBytes(); } /* OffHeapCache stats. */ /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#ALLOC_FAILURE_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#ALLOC_FAILURE_NAME}

* *

Currently, with the default off-heap allocator, an allocation * failure occurs only when OutOfMemoryError is thrown by {@code * Unsafe.allocateMemory}. This might be considered a fatal error, since it * means that no memory is available on the machine or VM. In practice, * we have not seen this occur because Linux will automatically kill * processes that are rapidly allocating memory when available memory is * very low.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Debugging */ public long getOffHeapAllocFailures() { return cacheStats.getLong(OffHeapStatDefinition.ALLOC_FAILURE); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#ALLOC_OVERFLOW_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#ALLOC_OVERFLOW_NAME}

* *

Currently, with the default off-heap allocator, this never happens * because the allocator will perform the allocation as long as any memory * is available. Even so, the off-heap evictor normally prevents * overflowing of the off-heap cache by freeing memory before it is * needed.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Debugging */ public long getOffHeapAllocOverflows() { return cacheStats.getLong(OffHeapStatDefinition.ALLOC_OVERFLOW); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#THREAD_UNAVAILABLE_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#THREAD_UNAVAILABLE_NAME}

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Critical * Eviction */ public long getOffHeapThreadUnavailable() { return cacheStats.getLong(OffHeapStatDefinition.THREAD_UNAVAILABLE); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_TARGETED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_TARGETED_NAME}

* *

Nodes are selected as targets by the evictor based on LRU, always * selecting from the cold end of the LRU list. First, non-dirty nodes and * nodes referring to off-heap LNs are selected based on LRU. When there * are no more such nodes then dirty nodes with no off-heap LNs are * selected, based on LRU.

* *

An eviction target may actually be evicted, or skipped, or put * back to the LRU, potentially after stripping child LNs or mutation to * a BIN-delta.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Critical * Eviction */ public long getOffHeapNodesTargeted() { return offHeapStats.getLong(OffHeapStatDefinition.NODES_TARGETED); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#CRITICAL_NODES_TARGETED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#CRITICAL_NODES_TARGETED_NAME}

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Critical * Eviction */ public long getOffHeapCriticalNodesTargeted() { return cacheStats.getLong( OffHeapStatDefinition.CRITICAL_NODES_TARGETED); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_EVICTED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_EVICTED_NAME}

* *

An evicted BIN is completely removed from the off-heap cache and LRU * list. If it is dirty, it must be logged. A BIN is evicted only if it has * no off-heap child LNs and it cannot be mutated to a BIN-delta.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Eviction */ public long getOffHeapNodesEvicted() { return cacheStats.getLong(OffHeapStatDefinition.NODES_EVICTED); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#DIRTY_NODES_EVICTED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#DIRTY_NODES_EVICTED_NAME}

* *

This stat value is a subset of {@link #getOffHeapNodesEvicted()}.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Eviction */ public long getOffHeapDirtyNodesEvicted() { return cacheStats.getLong(OffHeapStatDefinition.DIRTY_NODES_EVICTED); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_STRIPPED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_STRIPPED_NAME}

* *

If space can be reclaimed by stripping a BIN, this prevents mutating * the BIN to a BIN-delta or evicting the BIN entirely. * See {@link CacheMode} for a description of eviction.

* *

When a BIN is stripped, all off-heap LNs that the BIN refers to are * evicted and space is reclaimed for expired records. The {@link * #getOffHeapLNsEvicted()} stat is incremented accordingly.

* *

A stripped BIN could be a BIN in main cache that is stripped of * off-heap LNs, or a BIN that is off-heap and also refers to off-heap * LNs. When a main cache BIN is stripped, it is removed from the * off-heap LRU. When an off-heap BIN is stripped, it is either modified * in place to remove the LN references (this is done when a small * number of LNs are referenced and the wasted space is small), or is * copied to a new, smaller off-heap block with no LN references.

* *

After stripping an off-heap BIN, it is moved to the hot end of the * LRU list. Off-heap BINs are only mutated to BIN-deltas or evicted * completely when they do not refer to any off-heap LNs. This gives * BINs precedence over LNs in the cache. * *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Debugging */ public long getOffHeapNodesStripped() { return cacheStats.getLong(OffHeapStatDefinition.NODES_STRIPPED); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_MUTATED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_MUTATED_NAME}

* *

Mutation to a BIN-delta is performed for full BINs that do not * refer to any off-heap LNs and can be represented as BIN-deltas in * cache and on disk (see {@link EnvironmentConfig#TREE_BIN_DELTA}). * When a BIN is mutated, it is is copied to a new, smaller off-heap * block. After mutating an off-heap BIN, it is moved to the hot end of * the LRU list.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Eviction */ public long getOffHeapNodesMutated() { return cacheStats.getLong(OffHeapStatDefinition.NODES_MUTATED); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_SKIPPED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#NODES_SKIPPED_NAME}

* *

For example, a node will be skipped if it has been moved to the * hot end of the LRU list by another thread, or more rarely, already * processed by another evictor thread. This can occur because there is * a short period of time where a targeted node has been removed from * the LRU by the evictor thread, but not yet latched.

* *

The number of skipped nodes is normally very small, compared to the * number of targeted nodes.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Debugging */ public long getOffHeapNodesSkipped() { return cacheStats.getLong(OffHeapStatDefinition.NODES_SKIPPED); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_EVICTED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_EVICTED_NAME}

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Eviction */ public long getOffHeapLNsEvicted() { return offHeapStats.getLong(OffHeapStatDefinition.LNS_EVICTED); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_LOADED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_LOADED_NAME}

* *

LNs are loaded when requested by CRUD operations or other internal * btree operations.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Debugging */ public long getOffHeapLNsLoaded() { return offHeapStats.getLong(OffHeapStatDefinition.LNS_LOADED); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_STORED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#LNS_STORED_NAME}

* *

LNs are stored off-heap when they are evicted from the main cache. * Note that when {@link CacheMode#EVICT_LN} is used, the LN resides in * the main cache for a very short period since it is evicted after the * CRUD operation is complete.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Debugging */ public long getOffHeapLNsStored() { return offHeapStats.getLong(OffHeapStatDefinition.LNS_STORED); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#BINS_LOADED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#BINS_LOADED_NAME}

* *

BINs are loaded when needed by CRUD operations or other internal * btree operations.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Debugging */ public long getOffHeapBINsLoaded() { return offHeapStats.getLong(OffHeapStatDefinition.BINS_LOADED); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#BINS_STORED_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#BINS_STORED_NAME}

* *

BINs are stored off-heap when they are evicted from the main cache. * Note that when {@link CacheMode#EVICT_BIN} is used, the BIN resides * in the main cache for a very short period since it is evicted after * the CRUD operation is complete.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Debugging */ public long getOffHeapBINsStored() { return offHeapStats.getLong(OffHeapStatDefinition.BINS_STORED); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_LNS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_LNS_NAME}

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Sizing */ public int getOffHeapCachedLNs() { return offHeapStats.getInt(OffHeapStatDefinition.CACHED_LNS); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_BINS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_BINS_NAME}

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Sizing */ public int getOffHeapCachedBINs() { return offHeapStats.getInt(OffHeapStatDefinition.CACHED_BINS); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_BIN_DELTAS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#CACHED_BIN_DELTAS_NAME}

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Sizing */ public int getOffHeapCachedBINDeltas() { return offHeapStats.getInt(OffHeapStatDefinition.CACHED_BIN_DELTAS); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#TOTAL_BYTES_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#TOTAL_BYTES_NAME}

* *

This includes the estimated overhead for off-heap memory blocks, as * well as their contents.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* *

To get the configured maximum off-heap cache size, see {@link * EnvironmentMutableConfig#getOffHeapCacheSize()}.

* * @see Cache Statistics: Sizing */ public long getOffHeapTotalBytes() { return offHeapStats.getLong(OffHeapStatDefinition.TOTAL_BYTES); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#TOTAL_BLOCKS_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#TOTAL_BLOCKS_NAME}

* *

There is one block for each off-heap BIN and one for each off-heap * LN. So the total number of blocks is the sum of * {@link #getOffHeapCachedLNs} and {@link #getOffHeapCachedBINs}.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Debugging */ public long getOffHeapTotalBlocks() { return offHeapStats.getInt(OffHeapStatDefinition.TOTAL_BLOCKS); } /** *

{@value * com.sleepycat.je.evictor.OffHeapStatDefinition#LRU_SIZE_DESC}

* *

Group: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.evictor.OffHeapStatDefinition#LRU_SIZE_NAME}

* *

The off-heap LRU list is stored in the Java heap. Each entry occupies * 20 bytes of memory when compressed oops are used, or 24 bytes otherwise. * This memory is not considered part of the JE main cache, and is not * included in main cache statistics.

* *

There is one LRU entry for each off-heap BIN, and one for each BIN in * main cache that refers to one or more off-heap LNs. The latter approach * avoids an LRU entry per off-heap LN, which would use excessive amounts * of space in the Java heap. Similarly, when an off-heap BIN refers to * off-heap LNs, only one LRU entry (for the BIN) is used.

* *

If this environment uses the shared cache, the return value is the * total for all environments that are sharing the cache.

* * @see Cache Statistics: Debugging */ public long getOffHeapLRUSize() { return offHeapStats.getInt(OffHeapStatDefinition.LRU_SIZE); } /* Btree operation stats. */ /** *

{@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_RELATCHES_REQUIRED_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_RELATCHES_REQUIRED_NAME}

* * @see Btree Operation Statistics */ public long getRelatchesRequired() { return btreeOpStats.getLong(BT_OP_RELATCHES_REQUIRED); } /** *

{@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_ROOT_SPLITS_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_ROOT_SPLITS_NAME}

* * @see Btree Operation Statistics */ public long getRootSplits() { return btreeOpStats.getLong(BT_OP_ROOT_SPLITS); } /** *

{@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_BIN_DELTA_GETS_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_BIN_DELTA_GETS_NAME}

* * @see Btree Operation Statistics */ public long getNBinDeltaGetOps() { return btreeOpStats.getAtomicLong(BT_OP_BIN_DELTA_GETS); } /** *

{@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_BIN_DELTA_INSERTS_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_BIN_DELTA_INSERTS_NAME}

* * @see Btree Operation Statistics */ public long getNBinDeltaInsertOps() { return btreeOpStats.getAtomicLong(BT_OP_BIN_DELTA_INSERTS); } /** *

{@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_BIN_DELTA_UPDATES_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_BIN_DELTA_UPDATES_NAME}

* * @see Btree Operation Statistics */ public long getNBinDeltaUpdateOps() { return btreeOpStats.getAtomicLong(BT_OP_BIN_DELTA_UPDATES); } /** *

{@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_BIN_DELTA_DELETES_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.BTreeStatDefinition#BT_OP_BIN_DELTA_DELETES_NAME}

* * @see Btree Operation Statistics */ public long getNBinDeltaDeleteOps() { return btreeOpStats.getAtomicLong(BT_OP_BIN_DELTA_DELETES); } /* Lock stats. */ /** *

{@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_OWNERS_DESC}

* *

Group: {@value * com.sleepycat.je.txn.LockStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_OWNERS_NAME} *
Zero is returned when {@link StatsConfig#setFast fast stats} * are requested.

* * @see Lock Statistics */ public int getNOwners() { return lockStats.getInt(LOCK_OWNERS); } /** *

{@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_TOTAL_DESC}

* *

Group: {@value * com.sleepycat.je.txn.LockStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_TOTAL_NAME} *
Zero is returned when {@link StatsConfig#setFast fast stats} * are requested.

* * @see Lock Statistics */ public int getNTotalLocks() { return lockStats.getInt(LOCK_TOTAL); } /** *

{@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_READ_LOCKS_DESC}

* *

Group: {@value * com.sleepycat.je.txn.LockStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_READ_LOCKS_NAME} *
Zero is returned when {@link StatsConfig#setFast fast stats} * are requested.

* * @see Lock Statistics */ public int getNReadLocks() { return lockStats.getInt(LOCK_READ_LOCKS); } /** *

{@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_WRITE_LOCKS_DESC}

* *

Group: {@value * com.sleepycat.je.txn.LockStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_WRITE_LOCKS_NAME} *
Zero is returned when {@link StatsConfig#setFast fast stats} * are requested.

* * @see Lock Statistics */ public int getNWriteLocks() { return lockStats.getInt(LOCK_WRITE_LOCKS); } /** *

{@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_WAITERS_DESC}

* *

Group: {@value * com.sleepycat.je.txn.LockStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_WAITERS_NAME} *
Zero is returned when {@link StatsConfig#setFast fast stats} * are requested.

* * @see Lock Statistics */ public int getNWaiters() { return lockStats.getInt(LOCK_WAITERS); } /** *

{@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_REQUESTS_DESC}

* *

Group: {@value * com.sleepycat.je.txn.LockStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_REQUESTS_NAME}

* * @see Lock Statistics */ public long getNRequests() { return lockStats.getLong(LOCK_REQUESTS); } /** *

{@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_WAITS_DESC}

* *

Group: {@value * com.sleepycat.je.txn.LockStatDefinition#GROUP_NAME} *
Name: {@value * com.sleepycat.je.txn.LockStatDefinition#LOCK_WAITS_NAME}

* * @see Lock Statistics */ public long getNWaits() { return lockStats.getLong(LOCK_WAITS); } /** * @deprecated Always returns zero. */ public int getNAcquiresNoWaiters() { return 0; } /** * @deprecated Always returns zero. */ public int getNAcquiresSelfOwned() { return 0; } /** * @deprecated Always returns zero. */ public int getNAcquiresWithContention() { return 0; } /** * @deprecated Always returns zero. */ public int getNAcquiresNoWaitSuccessful() { return 0; } /** * @deprecated Always returns zero. */ public int getNAcquiresNoWaitUnSuccessful() { return 0; } /** * @deprecated Always returns zero. */ public int getNReleases() { return 0; } /* Throughput stats. */ /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_SEARCH_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_SEARCH_NAME}

*

* This operation corresponds to one of the following API calls: *

    *
  • * A successful {@link Cursor#get(DatabaseEntry, DatabaseEntry, * Get, ReadOptions) Cursor.get} or {@link * Database#get(Transaction, DatabaseEntry, DatabaseEntry, Get, * ReadOptions) Database.get} call with {@link Get#SEARCH}, {@link * Get#SEARCH_GTE}, {@link Get#SEARCH_BOTH}, or {@link * Get#SEARCH_BOTH_GTE}. *
  • *
  • * A successful {@link SecondaryCursor#get(DatabaseEntry, * DatabaseEntry, DatabaseEntry, Get, ReadOptions) * SecondaryCursor.get} or {@link * SecondaryDatabase#get(Transaction, DatabaseEntry, DatabaseEntry, * DatabaseEntry, Get, ReadOptions) SecondaryDatabase.get} call * when the primary data is requested (via the {@code data} param). * This call internally performs a key search operation in the * primary DB in order to return the data. *
  • *
* * @see Operation Throughput Statistics */ public long getPriSearchOps() { return throughputStats.getAtomicLong(THROUGHPUT_PRI_SEARCH); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_SEARCH_FAIL_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_SEARCH_FAIL_NAME}

*

* This operation corresponds to a call to {@link Cursor#get(DatabaseEntry, * DatabaseEntry, Get, ReadOptions) Cursor.get} or {@link * Database#get(Transaction, DatabaseEntry, DatabaseEntry, Get, * ReadOptions) Database.get} with {@link Get#SEARCH}, {@link * Get#SEARCH_GTE}, {@link Get#SEARCH_BOTH}, or {@link * Get#SEARCH_BOTH_GTE}, when the specified key is not found in the DB. * * @see Operation Throughput Statistics */ public long getPriSearchFailOps() { return throughputStats.getAtomicLong(THROUGHPUT_PRI_SEARCH_FAIL); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_SEARCH_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_SEARCH_NAME}

*

* This operation corresponds to a successful call to {@link * SecondaryCursor#get(DatabaseEntry, DatabaseEntry, DatabaseEntry, Get, * ReadOptions) SecondaryCursor.get} or {@link * SecondaryDatabase#get(Transaction, DatabaseEntry, DatabaseEntry, * DatabaseEntry, Get, ReadOptions) SecondaryDatabase.get} with * {@link Get#SEARCH}, {@link Get#SEARCH_GTE}, {@link Get#SEARCH_BOTH}, or * {@link Get#SEARCH_BOTH_GTE}. *

* Note: Operations are currently counted as secondary DB (rather than * primary DB) operations only if the DB has been opened by the application * as a secondary DB. In particular the stats may be confusing on an HA * replica node if a secondary DB has not been opened by the application on * the replica. * * @see Operation Throughput Statistics */ public long getSecSearchOps() { return throughputStats.getAtomicLong(THROUGHPUT_SEC_SEARCH); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_SEARCH_FAIL_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_SEARCH_FAIL_NAME}

*

* This operation corresponds to a call to {@link * SecondaryCursor#get(DatabaseEntry, DatabaseEntry, DatabaseEntry, Get, * ReadOptions) SecondaryCursor.get} or {@link * SecondaryDatabase#get(Transaction, DatabaseEntry, DatabaseEntry, * DatabaseEntry, Get, ReadOptions) SecondaryDatabase.get} with {@link * Get#SEARCH}, {@link Get#SEARCH_GTE}, {@link Get#SEARCH_BOTH}, or {@link * Get#SEARCH_BOTH_GTE}, when the specified key is not found in the DB. *

* Note: Operations are currently counted as secondary DB (rather than * primary DB) operations only if the DB has been opened by the application * as a secondary DB. In particular the stats may be confusing on an HA * replica node if a secondary DB has not been opened by the application on * the replica. * * @see Operation Throughput Statistics */ public long getSecSearchFailOps() { return throughputStats.getAtomicLong(THROUGHPUT_SEC_SEARCH_FAIL); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_POSITION_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_POSITION_NAME}

*

* This operation corresponds to a successful call to {@link * Cursor#get(DatabaseEntry, DatabaseEntry, Get, ReadOptions) Cursor.get} * or {@link Database#get(Transaction, DatabaseEntry, DatabaseEntry, Get, * ReadOptions) Database.get} with {@link Get#FIRST}, {@link Get#LAST}, * {@link Get#NEXT}, {@link Get#NEXT_DUP}, {@link Get#NEXT_NO_DUP}, * {@link Get#PREV}, {@link Get#PREV_DUP} or {@link Get#PREV_NO_DUP}. * * @see Operation Throughput Statistics */ public long getPriPositionOps() { return throughputStats.getAtomicLong(THROUGHPUT_PRI_POSITION); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_POSITION_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_POSITION_NAME}

*

* This operation corresponds to a successful call to {@link * SecondaryCursor#get(DatabaseEntry, DatabaseEntry, DatabaseEntry, Get, * ReadOptions) SecondaryCursor.get} or {@link * SecondaryDatabase#get(Transaction, DatabaseEntry, DatabaseEntry, * DatabaseEntry, Get, ReadOptions) SecondaryDatabase.get} with * {@link Get#FIRST}, {@link Get#LAST}, * {@link Get#NEXT}, {@link Get#NEXT_DUP}, {@link Get#NEXT_NO_DUP}, * {@link Get#PREV}, {@link Get#PREV_DUP} or {@link Get#PREV_NO_DUP}. *

* Note: Operations are currently counted as secondary DB (rather than * primary DB) operations only if the DB has been opened by the application * as a secondary DB. In particular the stats may be confusing on an HA * replica node if a secondary DB has not been opened by the application on * the replica. * * @see Operation Throughput Statistics */ public long getSecPositionOps() { return throughputStats.getAtomicLong(THROUGHPUT_SEC_POSITION); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_INSERT_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_INSERT_NAME}

*

* This operation corresponds to a successful call to {@link * Cursor#put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) Cursor.put} * or {@link Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, * WriteOptions) Database.put} in one of the following cases: *

    *
  • * When {@link Put#NO_OVERWRITE} or {@link Put#NO_DUP_DATA} is * specified. *
  • *
  • * When {@link Put#OVERWRITE} is specified and the key was inserted * because it previously did not exist in the DB. *
  • *
* * @see Operation Throughput Statistics */ public long getPriInsertOps() { return throughputStats.getAtomicLong(THROUGHPUT_PRI_INSERT); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_INSERT_FAIL_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_INSERT_FAIL_NAME}

*

* This operation corresponds to a call to {@link Cursor#put(DatabaseEntry, * DatabaseEntry, Put, WriteOptions) Cursor.put} or {@link * Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, * WriteOptions) Database.put} with {@link Put#NO_OVERWRITE} or {@link * Put#NO_DUP_DATA}, when the key could not be inserted because it * previously existed in the DB. * * @see Operation Throughput Statistics */ public long getPriInsertFailOps() { return throughputStats.getAtomicLong(THROUGHPUT_PRI_INSERT_FAIL); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_INSERT_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_INSERT_NAME}

*

* This operation corresponds to a successful call to {@link * Cursor#put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) Cursor.put} * or {@link Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, * WriteOptions) Database.put}, for a primary DB with an associated * secondary DB. A secondary record is inserted when inserting a primary * record with a non-null secondary key, or when updating a primary record * and the secondary key is changed to to a non-null value that is * different than the previously existing value. *

* Note: Operations are currently counted as secondary DB (rather than * primary DB) operations only if the DB has been opened by the application * as a secondary DB. In particular the stats may be confusing on an HA * replica node if a secondary DB has not been opened by the application on * the replica. * * @see Operation Throughput Statistics */ public long getSecInsertOps() { return throughputStats.getAtomicLong(THROUGHPUT_SEC_INSERT); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_UPDATE_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_UPDATE_NAME}

*

* This operation corresponds to a successful call to {@link * Cursor#put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) Cursor.put} * or {@link Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, * WriteOptions) Database.put} in one of the following cases: *

    *
  • * When {@link Put#OVERWRITE} is specified and the key previously * existed in the DB. *
  • *
  • * When calling {@code Cursor.put} with {@link Put#CURRENT}. *
  • *
* * @see Operation Throughput Statistics */ public long getPriUpdateOps() { return throughputStats.getAtomicLong(THROUGHPUT_PRI_UPDATE); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_UPDATE_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_UPDATE_NAME}

*

* This operation corresponds to a successful call to {@link * Cursor#put(DatabaseEntry, DatabaseEntry, Put, WriteOptions) Cursor.put} * or {@link Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, * WriteOptions) Database.put}, when a primary record is updated and its * TTL is changed. The associated secondary records must also be updated to * reflect the change in the TTL. *

* Note: Operations are currently counted as secondary DB (rather than * primary DB) operations only if the DB has been opened by the application * as a secondary DB. In particular the stats may be confusing on an HA * replica node if a secondary DB has not been opened by the application on * the replica. * * @see Operation Throughput Statistics */ public long getSecUpdateOps() { return throughputStats.getAtomicLong(THROUGHPUT_SEC_UPDATE); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_DELETE_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_DELETE_NAME}

*

* This operation corresponds to a successful call to {@link * Cursor#delete() Cursor.delete}, {@link Database#delete(Transaction, * DatabaseEntry, WriteOptions) Database.delete}, {@link * SecondaryCursor#delete() SecondaryCursor.delete} or {@link * SecondaryDatabase#delete(Transaction, DatabaseEntry, WriteOptions) * SecondaryDatabase.delete}. * * @see Operation Throughput Statistics */ public long getPriDeleteOps() { return throughputStats.getAtomicLong(THROUGHPUT_PRI_DELETE); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_DELETE_FAIL_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_PRI_DELETE_FAIL_NAME}

*

* This operation corresponds to a call to {@link * Database#delete(Transaction, DatabaseEntry, * WriteOptions) Database.delete} or {@link * SecondaryDatabase#delete(Transaction, DatabaseEntry, WriteOptions) * SecondaryDatabase.delete}, when the key could not be deleted because it * did not previously exist in the DB. * * @see Operation Throughput Statistics */ public long getPriDeleteFailOps() { return throughputStats.getAtomicLong(THROUGHPUT_PRI_DELETE_FAIL); } /** *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_DELETE_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#THROUGHPUT_SEC_DELETE_NAME}

*

* This operation corresponds to one of the following API calls: *

    *
  • * A successful call to {@link Cursor#delete() Cursor.delete} or * {@link Database#delete(Transaction, DatabaseEntry, * WriteOptions) Database.delete}, that deletes a primary record * containing a non-null secondary key. *
  • *
  • * A successful call to {@link SecondaryCursor#delete() * SecondaryCursor.delete} or {@link * SecondaryDatabase#delete(Transaction, DatabaseEntry, * WriteOptions) SecondaryDatabase.delete}. *
  • *
  • * A successful call to {@link Cursor#put(DatabaseEntry, * DatabaseEntry, Put, WriteOptions) Cursor.put} or {@link * Database#put(Transaction, DatabaseEntry, DatabaseEntry, Put, * WriteOptions) Database.put} that updates a primary record and * changes its previously non-null secondary key to null. *
  • *
*

* Note: Operations are currently counted as secondary DB (rather than * primary DB) operations only if the DB has been opened by the application * as a secondary DB. In particular the stats may be confusing on an HA * replica node if a secondary DB has not been opened by the application on * the replica. * * @see Operation Throughput Statistics */ public long getSecDeleteOps() { return throughputStats.getAtomicLong(THROUGHPUT_SEC_DELETE); } /* TaskCoordinator stats. */ /** * @hidden *

{@value * com.sleepycat.je.utilint.TaskCoordinator.StatDefs#REAL_PERMITS_DESC}

* *

Group: {@value * com.sleepycat.je.utilint.TaskCoordinator.StatDefs#GROUP_NAME} *
Name: {@value * com.sleepycat.je.utilint.TaskCoordinator.StatDefs#REAL_PERMITS_NAME}

*

* This counter is effectively incremented each time a call to * {@link TaskCoordinator#acquirePermit} returns a real permit and is * decremented {@link TaskCoordinator#releasePermit} releases a real * permit. * * @see Task Coordinator Statistics */ public long getRealPermits() { return taskCoordinatorStats.getInt( TaskCoordinator.StatDefs.REAL_PERMITS); } /** * @hidden *

{@value * com.sleepycat.je.utilint.TaskCoordinator.StatDefs#DEFICIT_PERMITS_DESC}

* *

Group: {@value * com.sleepycat.je.utilint.TaskCoordinator.StatDefs#GROUP_NAME} *
Name: {@value * com.sleepycat.je.utilint.TaskCoordinator.StatDefs#DEFICIT_PERMITS_NAME}

*

* This counter is effectively incremented each time a call to * {@link TaskCoordinator#acquirePermit} returns a deficit permit and is * decremented {@link TaskCoordinator#releasePermit} releases a deficit * permit. *

* This number should typically be zero. If it is consistently positive * it indicates that the application load is high and housekeeping tasks * are being throttled. * * @see Task Coordinator Statistics */ public long getDeficitPermits() { return taskCoordinatorStats.getInt( TaskCoordinator.StatDefs.DEFICIT_PERMITS); } /** * @hidden *

{@value * com.sleepycat.je.utilint.TaskCoordinator.StatDefs#APPLICATION_PERMITS_DESC}

* *

Group: {@value * com.sleepycat.je.utilint.TaskCoordinator.StatDefs#GROUP_NAME} *
Name: {@value * com.sleepycat.je.utilint.TaskCoordinator.StatDefs#APPLICATION_PERMITS_NAME}

*

* This number changes as a result of calls to * {@link TaskCoordinator#setAppPermitPercent}. Increasing this percentage * results in more permits being reserved by the application, while * reducing the percentage decreases this number. * * @see Task Coordinator Statistics */ public long getApplicationPermits() { return taskCoordinatorStats.getInt( TaskCoordinator.StatDefs.APPLICATION_PERMITS); } /** * @hidden For internal use: automatic backups * *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#BACKUP_COPY_FILES_COUNT_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#BACKUP_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#BACKUP_COPY_FILES_COUNT_NAME}

* * @see Automatic Backup Statistics */ public int getBackupCopyFilesCount() { return backupStats.getInt(BACKUP_COPY_FILES_COUNT); } /** * @hidden For internal use: automatic backups * *

{@value * com.sleepycat.je.dbi.DbiStatDefinition#BACKUP_COPY_FILES_MS_DESC}

* *

Group: {@value * com.sleepycat.je.dbi.DbiStatDefinition#BACKUP_GROUP_NAME} *
Name: {@value * com.sleepycat.je.dbi.DbiStatDefinition#BACKUP_COPY_FILES_MS_NAME}

* * @see Automatic Backup Statistics */ public long getBackupCopyFilesMs() { return backupStats.getLong(BACKUP_COPY_FILES_MS); } /** * Returns a String representation of the stats in the form of * <stat>=<value> */ @Override public String toString() { StringBuilder sb = new StringBuilder(); for (StatGroup group : getStatGroups()) { sb.append(group.toString()); } return sb.toString(); } /** * Returns a String representation of the stats which includes stats * descriptions in addition to <stat>=<value> */ public String toStringVerbose() { StringBuilder sb = new StringBuilder(); for (StatGroup group : getStatGroups()) { sb.append(group.toStringVerbose()); } return sb.toString(); } /** * @hidden * Internal use only. * JConsole plugin support: Get tips for stats. */ public Map getTips() { Map tipsMap = new HashMap<>(); for (StatGroup group : getStatGroups()) { group.addToTipMap(tipsMap); } return tipsMap; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy