Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
com.pulumi.splunk.IndexesArgs Maven / Gradle / Ivy
Go to download
A Pulumi package for creating and managing splunk cloud resources.
// *** WARNING: this file was generated by pulumi-java-gen. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package com.pulumi.splunk;
import com.pulumi.core.Output;
import com.pulumi.core.annotations.Import;
import com.pulumi.splunk.inputs.IndexesAclArgs;
import java.lang.Boolean;
import java.lang.Integer;
import java.lang.String;
import java.util.Objects;
import java.util.Optional;
import javax.annotation.Nullable;
public final class IndexesArgs extends com.pulumi.resources.ResourceArgs {
public static final IndexesArgs Empty = new IndexesArgs();
/**
* The app/user context that is the namespace for the resource
*
*/
@Import(name="acl")
private @Nullable Output acl;
/**
* @return The app/user context that is the namespace for the resource
*
*/
public Optional> acl() {
return Optional.ofNullable(this.acl);
}
/**
* Controls how many events make up a block for block signatures. If this is set to 0, block signing is disabled for this index. <br>A recommended value is 100.
*
*/
@Import(name="blockSignSize")
private @Nullable Output blockSignSize;
/**
* @return Controls how many events make up a block for block signatures. If this is set to 0, block signing is disabled for this index. <br>A recommended value is 100.
*
*/
public Optional> blockSignSize() {
return Optional.ofNullable(this.blockSignSize);
}
/**
* Suggestion for the bucket rebuild process for the size of the time-series (tsidx) file to make.
* <be>Caution: This is an advanced parameter. Inappropriate use of this parameter causes splunkd to not start if rebuild is required. Do not set this parameter unless instructed by Splunk Support.
* Default value, auto, varies by the amount of physical RAM on the host<br>
* less than 2GB RAM = 67108864 (64MB) tsidx
* 2GB to 8GB RAM = 134217728 (128MB) tsidx
* more than 8GB RAM = 268435456 (256MB) tsidx<br>
* Values other than "auto" must be 16MB-1GB. Highest legal value (of the numerical part) is 4294967295 You can specify the value using a size suffix: "16777216" or "16MB" are equivalent.
*
*/
@Import(name="bucketRebuildMemoryHint")
private @Nullable Output bucketRebuildMemoryHint;
/**
* @return Suggestion for the bucket rebuild process for the size of the time-series (tsidx) file to make.
* <be>Caution: This is an advanced parameter. Inappropriate use of this parameter causes splunkd to not start if rebuild is required. Do not set this parameter unless instructed by Splunk Support.
* Default value, auto, varies by the amount of physical RAM on the host<br>
* less than 2GB RAM = 67108864 (64MB) tsidx
* 2GB to 8GB RAM = 134217728 (128MB) tsidx
* more than 8GB RAM = 268435456 (256MB) tsidx<br>
* Values other than "auto" must be 16MB-1GB. Highest legal value (of the numerical part) is 4294967295 You can specify the value using a size suffix: "16777216" or "16MB" are equivalent.
*
*/
public Optional> bucketRebuildMemoryHint() {
return Optional.ofNullable(this.bucketRebuildMemoryHint);
}
/**
* An absolute path that contains the colddbs for the index. The path must be readable and writable. Cold databases are opened as needed when searching.
*
*/
@Import(name="coldPath")
private @Nullable Output coldPath;
/**
* @return An absolute path that contains the colddbs for the index. The path must be readable and writable. Cold databases are opened as needed when searching.
*
*/
public Optional> coldPath() {
return Optional.ofNullable(this.coldPath);
}
/**
* Destination path for the frozen archive. Use as an alternative to a coldToFrozenScript. Splunk software automatically puts frozen buckets in this directory.
* <br>
* Bucket freezing policy is as follows:<br>
* New style buckets (4.2 and on): removes all files but the rawdata<br>
* To thaw, run splunk rebuild <bucket dir> on the bucket, then move to the thawed directory<br>
* Old style buckets (Pre-4.2): gzip all the .data and .tsidx files<br>
* To thaw, gunzip the zipped files and move the bucket into the thawed directory<br>
* If both coldToFrozenDir and coldToFrozenScript are specified, coldToFrozenDir takes precedence
*
*/
@Import(name="coldToFrozenDir")
private @Nullable Output coldToFrozenDir;
/**
* @return Destination path for the frozen archive. Use as an alternative to a coldToFrozenScript. Splunk software automatically puts frozen buckets in this directory.
* <br>
* Bucket freezing policy is as follows:<br>
* New style buckets (4.2 and on): removes all files but the rawdata<br>
* To thaw, run splunk rebuild <bucket dir> on the bucket, then move to the thawed directory<br>
* Old style buckets (Pre-4.2): gzip all the .data and .tsidx files<br>
* To thaw, gunzip the zipped files and move the bucket into the thawed directory<br>
* If both coldToFrozenDir and coldToFrozenScript are specified, coldToFrozenDir takes precedence
*
*/
public Optional> coldToFrozenDir() {
return Optional.ofNullable(this.coldToFrozenDir);
}
/**
* Path to the archiving script.
* <br>If your script requires a program to run it (for example, python), specify the program followed by the path. The script must be in $SPLUNK_HOME/bin or one of its subdirectories.
* <br>Splunk software ships with an example archiving script in $SPLUNK_HOME/bin called coldToFrozenExample.py. DO NOT use this example script directly. It uses a default path, and if modified in place any changes are overwritten on upgrade.
* <br>It is best to copy the example script to a new file in bin and modify it for your system. Most importantly, change the default archive path to an existing directory that fits your needs.
*
*/
@Import(name="coldToFrozenScript")
private @Nullable Output coldToFrozenScript;
/**
* @return Path to the archiving script.
* <br>If your script requires a program to run it (for example, python), specify the program followed by the path. The script must be in $SPLUNK_HOME/bin or one of its subdirectories.
* <br>Splunk software ships with an example archiving script in $SPLUNK_HOME/bin called coldToFrozenExample.py. DO NOT use this example script directly. It uses a default path, and if modified in place any changes are overwritten on upgrade.
* <br>It is best to copy the example script to a new file in bin and modify it for your system. Most importantly, change the default archive path to an existing directory that fits your needs.
*
*/
public Optional> coldToFrozenScript() {
return Optional.ofNullable(this.coldToFrozenScript);
}
/**
* This parameter is ignored. The splunkd process always compresses raw data.
*
*/
@Import(name="compressRawdata")
private @Nullable Output compressRawdata;
/**
* @return This parameter is ignored. The splunkd process always compresses raw data.
*
*/
public Optional> compressRawdata() {
return Optional.ofNullable(this.compressRawdata);
}
/**
* Valid values: (event | metric). Specifies the type of index.
*
*/
@Import(name="datatype")
private @Nullable Output datatype;
/**
* @return Valid values: (event | metric). Specifies the type of index.
*
*/
public Optional> datatype() {
return Optional.ofNullable(this.datatype);
}
/**
* Enables asynchronous "online fsck" bucket repair, which runs concurrently with Splunk software.
* When enabled, you do not have to wait until buckets are repaired to start the Splunk platform. However, you might observe a slight performance degratation.
*
*/
@Import(name="enableOnlineBucketRepair")
private @Nullable Output enableOnlineBucketRepair;
/**
* @return Enables asynchronous "online fsck" bucket repair, which runs concurrently with Splunk software.
* When enabled, you do not have to wait until buckets are repaired to start the Splunk platform. However, you might observe a slight performance degratation.
*
*/
public Optional> enableOnlineBucketRepair() {
return Optional.ofNullable(this.enableOnlineBucketRepair);
}
/**
* Number of seconds after which indexed data rolls to frozen.
* Defaults to 188697600 (6 years).Freezing data means it is removed from the index. If you need to archive your data, refer to coldToFrozenDir and coldToFrozenScript parameter documentation.
*
*/
@Import(name="frozenTimePeriodInSecs")
private @Nullable Output frozenTimePeriodInSecs;
/**
* @return Number of seconds after which indexed data rolls to frozen.
* Defaults to 188697600 (6 years).Freezing data means it is removed from the index. If you need to archive your data, refer to coldToFrozenDir and coldToFrozenScript parameter documentation.
*
*/
public Optional> frozenTimePeriodInSecs() {
return Optional.ofNullable(this.frozenTimePeriodInSecs);
}
/**
* An absolute path that contains the hot and warm buckets for the index.
* Required. Splunk software does not start if an index lacks a valid homePath.
* <br>Caution: The path must be readable and writable.
*
*/
@Import(name="homePath")
private @Nullable Output homePath;
/**
* @return An absolute path that contains the hot and warm buckets for the index.
* Required. Splunk software does not start if an index lacks a valid homePath.
* <br>Caution: The path must be readable and writable.
*
*/
public Optional> homePath() {
return Optional.ofNullable(this.homePath);
}
/**
* Valid values are: Integer[m|s|h|d].
* <br>If a warm or cold bucket is older than the specified age, do not create or rebuild its bloomfilter. Specify 0 to never rebuild bloomfilters.
*
*/
@Import(name="maxBloomBackfillBucketAge")
private @Nullable Output maxBloomBackfillBucketAge;
/**
* @return Valid values are: Integer[m|s|h|d].
* <br>If a warm or cold bucket is older than the specified age, do not create or rebuild its bloomfilter. Specify 0 to never rebuild bloomfilters.
*
*/
public Optional> maxBloomBackfillBucketAge() {
return Optional.ofNullable(this.maxBloomBackfillBucketAge);
}
/**
* The number of concurrent optimize processes that can run against a hot bucket.
* This number should be increased if instructed by Splunk Support. Typically the default value should suffice.
*
*/
@Import(name="maxConcurrentOptimizes")
private @Nullable Output maxConcurrentOptimizes;
/**
* @return The number of concurrent optimize processes that can run against a hot bucket.
* This number should be increased if instructed by Splunk Support. Typically the default value should suffice.
*
*/
public Optional> maxConcurrentOptimizes() {
return Optional.ofNullable(this.maxConcurrentOptimizes);
}
/**
* The maximum size in MB for a hot DB to reach before a roll to warm is triggered. Specifying "auto" or "auto_high_volume" causes Splunk software to autotune this parameter (recommended).
* Use "auto_high_volume" for high volume indexes (such as the main index); otherwise, use "auto". A "high volume index" would typically be considered one that gets over 10GB of data per day.
*
*/
@Import(name="maxDataSize")
private @Nullable Output maxDataSize;
/**
* @return The maximum size in MB for a hot DB to reach before a roll to warm is triggered. Specifying "auto" or "auto_high_volume" causes Splunk software to autotune this parameter (recommended).
* Use "auto_high_volume" for high volume indexes (such as the main index); otherwise, use "auto". A "high volume index" would typically be considered one that gets over 10GB of data per day.
*
*/
public Optional> maxDataSize() {
return Optional.ofNullable(this.maxDataSize);
}
/**
* Maximum hot buckets that can exist per index. Defaults to 3.
* <br>When maxHotBuckets is exceeded, Splunk software rolls the least recently used (LRU) hot bucket to warm. Both normal hot buckets and quarantined hot buckets count towards this total. This setting operates independently of maxHotIdleSecs, which can also cause hot buckets to roll.
*
*/
@Import(name="maxHotBuckets")
private @Nullable Output maxHotBuckets;
/**
* @return Maximum hot buckets that can exist per index. Defaults to 3.
* <br>When maxHotBuckets is exceeded, Splunk software rolls the least recently used (LRU) hot bucket to warm. Both normal hot buckets and quarantined hot buckets count towards this total. This setting operates independently of maxHotIdleSecs, which can also cause hot buckets to roll.
*
*/
public Optional> maxHotBuckets() {
return Optional.ofNullable(this.maxHotBuckets);
}
/**
* Maximum life, in seconds, of a hot bucket. Defaults to 0. If a hot bucket exceeds maxHotIdleSecs, Splunk software rolls it to warm. This setting operates independently of maxHotBuckets, which can also cause hot buckets to roll. A value of 0 turns off the idle check (equivalent to INFINITE idle time).
*
*/
@Import(name="maxHotIdleSecs")
private @Nullable Output maxHotIdleSecs;
/**
* @return Maximum life, in seconds, of a hot bucket. Defaults to 0. If a hot bucket exceeds maxHotIdleSecs, Splunk software rolls it to warm. This setting operates independently of maxHotBuckets, which can also cause hot buckets to roll. A value of 0 turns off the idle check (equivalent to INFINITE idle time).
*
*/
public Optional> maxHotIdleSecs() {
return Optional.ofNullable(this.maxHotIdleSecs);
}
/**
* Upper bound of target maximum timespan of hot/warm buckets in seconds. Defaults to 7776000 seconds (90 days).
*
*/
@Import(name="maxHotSpanSecs")
private @Nullable Output maxHotSpanSecs;
/**
* @return Upper bound of target maximum timespan of hot/warm buckets in seconds. Defaults to 7776000 seconds (90 days).
*
*/
public Optional> maxHotSpanSecs() {
return Optional.ofNullable(this.maxHotSpanSecs);
}
/**
* The amount of memory, expressed in MB, to allocate for buffering a single tsidx file into memory before flushing to disk. Defaults to 5. The default is recommended for all environments.
*
*/
@Import(name="maxMemMb")
private @Nullable Output maxMemMb;
/**
* @return The amount of memory, expressed in MB, to allocate for buffering a single tsidx file into memory before flushing to disk. Defaults to 5. The default is recommended for all environments.
*
*/
public Optional> maxMemMb() {
return Optional.ofNullable(this.maxMemMb);
}
/**
* Upper limit, in seconds, on how long an event can sit in raw slice. Applies only if replication is enabled for this index. Otherwise ignored. If there are any acknowledged events sharing this raw slice, this paramater does not apply. In this case, maxTimeUnreplicatedWithAcks applies. Highest legal value is 2147483647. To disable this parameter, set to 0.
*
*/
@Import(name="maxMetaEntries")
private @Nullable Output maxMetaEntries;
/**
* @return Upper limit, in seconds, on how long an event can sit in raw slice. Applies only if replication is enabled for this index. Otherwise ignored. If there are any acknowledged events sharing this raw slice, this paramater does not apply. In this case, maxTimeUnreplicatedWithAcks applies. Highest legal value is 2147483647. To disable this parameter, set to 0.
*
*/
public Optional> maxMetaEntries() {
return Optional.ofNullable(this.maxMetaEntries);
}
/**
* Upper limit, in seconds, on how long an event can sit in raw slice. Applies only if replication is enabled for this index. Otherwise ignored.
* If there are any acknowledged events sharing this raw slice, this paramater does not apply. In this case, maxTimeUnreplicatedWithAcks applies.
* Highest legal value is 2147483647. To disable this parameter, set to 0.
*
*/
@Import(name="maxTimeUnreplicatedNoAcks")
private @Nullable Output maxTimeUnreplicatedNoAcks;
/**
* @return Upper limit, in seconds, on how long an event can sit in raw slice. Applies only if replication is enabled for this index. Otherwise ignored.
* If there are any acknowledged events sharing this raw slice, this paramater does not apply. In this case, maxTimeUnreplicatedWithAcks applies.
* Highest legal value is 2147483647. To disable this parameter, set to 0.
*
*/
public Optional> maxTimeUnreplicatedNoAcks() {
return Optional.ofNullable(this.maxTimeUnreplicatedNoAcks);
}
/**
* Upper limit, in seconds, on how long events can sit unacknowledged in a raw slice. Applies only if you have enabled acks on forwarders and have replication enabled (with clustering).
* Note: This is an advanced parameter. Make sure you understand the settings on all forwarders before changing this. This number should not exceed ack timeout configured on any forwarder, and should actually be set to at most half of the minimum value of that timeout. You can find this setting in outputs.conf readTimeout setting under the tcpout stanza.
* To disable, set to 0, but this is NOT recommended. Highest legal value is 2147483647.
*
*/
@Import(name="maxTimeUnreplicatedWithAcks")
private @Nullable Output maxTimeUnreplicatedWithAcks;
/**
* @return Upper limit, in seconds, on how long events can sit unacknowledged in a raw slice. Applies only if you have enabled acks on forwarders and have replication enabled (with clustering).
* Note: This is an advanced parameter. Make sure you understand the settings on all forwarders before changing this. This number should not exceed ack timeout configured on any forwarder, and should actually be set to at most half of the minimum value of that timeout. You can find this setting in outputs.conf readTimeout setting under the tcpout stanza.
* To disable, set to 0, but this is NOT recommended. Highest legal value is 2147483647.
*
*/
public Optional> maxTimeUnreplicatedWithAcks() {
return Optional.ofNullable(this.maxTimeUnreplicatedWithAcks);
}
/**
* The maximum size of an index (in MB). If an index grows larger than the maximum size, the oldest data is frozen.
*
*/
@Import(name="maxTotalDataSizeMb")
private @Nullable Output maxTotalDataSizeMb;
/**
* @return The maximum size of an index (in MB). If an index grows larger than the maximum size, the oldest data is frozen.
*
*/
public Optional> maxTotalDataSizeMb() {
return Optional.ofNullable(this.maxTotalDataSizeMb);
}
/**
* The maximum number of warm buckets. If this number is exceeded, the warm bucket/s with the lowest value for their latest times is moved to cold.
*
*/
@Import(name="maxWarmDbCount")
private @Nullable Output maxWarmDbCount;
/**
* @return The maximum number of warm buckets. If this number is exceeded, the warm bucket/s with the lowest value for their latest times is moved to cold.
*
*/
public Optional> maxWarmDbCount() {
return Optional.ofNullable(this.maxWarmDbCount);
}
/**
* Specify an integer (or "disable") for this parameter.
* This parameter sets how frequently splunkd forces a filesystem sync while compressing journal slices.
* During this period, uncompressed slices are left on disk even after they are compressed. Then splunkd forces a filesystem sync of the compressed journal and removes the accumulated uncompressed files.
* If 0 is specified, splunkd forces a filesystem sync after every slice completes compressing. Specifying "disable" disables syncing entirely: uncompressed slices are removed as soon as compression is complete.
*
*/
@Import(name="minRawFileSyncSecs")
private @Nullable Output minRawFileSyncSecs;
/**
* @return Specify an integer (or "disable") for this parameter.
* This parameter sets how frequently splunkd forces a filesystem sync while compressing journal slices.
* During this period, uncompressed slices are left on disk even after they are compressed. Then splunkd forces a filesystem sync of the compressed journal and removes the accumulated uncompressed files.
* If 0 is specified, splunkd forces a filesystem sync after every slice completes compressing. Specifying "disable" disables syncing entirely: uncompressed slices are removed as soon as compression is complete.
*
*/
public Optional> minRawFileSyncSecs() {
return Optional.ofNullable(this.minRawFileSyncSecs);
}
/**
* Minimum size of the queue that stores events in memory before committing them to a tsidx file.
*
*/
@Import(name="minStreamGroupQueueSize")
private @Nullable Output minStreamGroupQueueSize;
/**
* @return Minimum size of the queue that stores events in memory before committing them to a tsidx file.
*
*/
public Optional> minStreamGroupQueueSize() {
return Optional.ofNullable(this.minStreamGroupQueueSize);
}
/**
* The name of the index to create.
*
*/
@Import(name="name")
private @Nullable Output name;
/**
* @return The name of the index to create.
*
*/
public Optional> name() {
return Optional.ofNullable(this.name);
}
/**
* Related to serviceMetaPeriod. If set, it enables metadata sync every <integer> seconds, but only for records where the sync can be done efficiently in-place, without requiring a full re-write of the metadata file. Records that require full re-write are be sync'ed at serviceMetaPeriod.
* partialServiceMetaPeriod specifies, in seconds, how frequently it should sync. Zero means that this feature is turned off and serviceMetaPeriod is the only time when metadata sync happens.
* If the value of partialServiceMetaPeriod is greater than serviceMetaPeriod, this setting has no effect.
* By default it is turned off (zero).
*
*/
@Import(name="partialServiceMetaPeriod")
private @Nullable Output partialServiceMetaPeriod;
/**
* @return Related to serviceMetaPeriod. If set, it enables metadata sync every <integer> seconds, but only for records where the sync can be done efficiently in-place, without requiring a full re-write of the metadata file. Records that require full re-write are be sync'ed at serviceMetaPeriod.
* partialServiceMetaPeriod specifies, in seconds, how frequently it should sync. Zero means that this feature is turned off and serviceMetaPeriod is the only time when metadata sync happens.
* If the value of partialServiceMetaPeriod is greater than serviceMetaPeriod, this setting has no effect.
* By default it is turned off (zero).
*
*/
public Optional> partialServiceMetaPeriod() {
return Optional.ofNullable(this.partialServiceMetaPeriod);
}
/**
* Specifies, in seconds, how often the indexer checks the status of the child OS processes it launched to see if it can launch new processes for queued requests. Defaults to 15.
* If set to 0, the indexer checks child process status every second.
* Highest legal value is 4294967295.
*
*/
@Import(name="processTrackerServiceInterval")
private @Nullable Output processTrackerServiceInterval;
/**
* @return Specifies, in seconds, how often the indexer checks the status of the child OS processes it launched to see if it can launch new processes for queued requests. Defaults to 15.
* If set to 0, the indexer checks child process status every second.
* Highest legal value is 4294967295.
*
*/
public Optional> processTrackerServiceInterval() {
return Optional.ofNullable(this.processTrackerServiceInterval);
}
/**
* Events with timestamp of quarantineFutureSecs newer than "now" are dropped into quarantine bucket. Defaults to 2592000 (30 days).
* This is a mechanism to prevent main hot buckets from being polluted with fringe events.
*
*/
@Import(name="quarantineFutureSecs")
private @Nullable Output quarantineFutureSecs;
/**
* @return Events with timestamp of quarantineFutureSecs newer than "now" are dropped into quarantine bucket. Defaults to 2592000 (30 days).
* This is a mechanism to prevent main hot buckets from being polluted with fringe events.
*
*/
public Optional> quarantineFutureSecs() {
return Optional.ofNullable(this.quarantineFutureSecs);
}
/**
* Events with timestamp of quarantinePastSecs older than "now" are dropped into quarantine bucket. Defaults to 77760000 (900 days). This is a mechanism to prevent the main hot buckets from being polluted with fringe events.
*
*/
@Import(name="quarantinePastSecs")
private @Nullable Output quarantinePastSecs;
/**
* @return Events with timestamp of quarantinePastSecs older than "now" are dropped into quarantine bucket. Defaults to 77760000 (900 days). This is a mechanism to prevent the main hot buckets from being polluted with fringe events.
*
*/
public Optional> quarantinePastSecs() {
return Optional.ofNullable(this.quarantinePastSecs);
}
/**
* Target uncompressed size in bytes for individual raw slice in the rawdata journal of the index. Defaults to 131072 (128KB). 0 is not a valid value. If 0 is specified, rawChunkSizeBytes is set to the default value.
*
*/
@Import(name="rawChunkSizeBytes")
private @Nullable Output rawChunkSizeBytes;
/**
* @return Target uncompressed size in bytes for individual raw slice in the rawdata journal of the index. Defaults to 131072 (128KB). 0 is not a valid value. If 0 is specified, rawChunkSizeBytes is set to the default value.
*
*/
public Optional> rawChunkSizeBytes() {
return Optional.ofNullable(this.rawChunkSizeBytes);
}
/**
* Index replication control. This parameter applies to only clustering slaves.
* auto = Use the master index replication configuration value.
* 0 = Turn off replication for this index.
*
*/
@Import(name="repFactor")
private @Nullable Output repFactor;
/**
* @return Index replication control. This parameter applies to only clustering slaves.
* auto = Use the master index replication configuration value.
* 0 = Turn off replication for this index.
*
*/
public Optional> repFactor() {
return Optional.ofNullable(this.repFactor);
}
/**
* How frequently (in seconds) to check if a new hot bucket needs to be created. Also, how frequently to check if there are any warm/cold buckets that should be rolled/frozen.
*
*/
@Import(name="rotatePeriodInSecs")
private @Nullable Output rotatePeriodInSecs;
/**
* @return How frequently (in seconds) to check if a new hot bucket needs to be created. Also, how frequently to check if there are any warm/cold buckets that should be rolled/frozen.
*
*/
public Optional> rotatePeriodInSecs() {
return Optional.ofNullable(this.rotatePeriodInSecs);
}
/**
* Defines how frequently metadata is synced to disk, in seconds. Defaults to 25 (seconds).
* You may want to set this to a higher value if the sum of your metadata file sizes is larger than many tens of megabytes, to avoid the hit on I/O in the indexing fast path.
*
*/
@Import(name="serviceMetaPeriod")
private @Nullable Output serviceMetaPeriod;
/**
* @return Defines how frequently metadata is synced to disk, in seconds. Defaults to 25 (seconds).
* You may want to set this to a higher value if the sum of your metadata file sizes is larger than many tens of megabytes, to avoid the hit on I/O in the indexing fast path.
*
*/
public Optional> serviceMetaPeriod() {
return Optional.ofNullable(this.serviceMetaPeriod);
}
/**
* When true, a sync operation is called before file descriptor is closed on metadata file updates. This functionality improves integrity of metadata files, especially in regards to operating system crashes/machine failures.
*
*/
@Import(name="syncMeta")
private @Nullable Output syncMeta;
/**
* @return When true, a sync operation is called before file descriptor is closed on metadata file updates. This functionality improves integrity of metadata files, especially in regards to operating system crashes/machine failures.
*
*/
public Optional> syncMeta() {
return Optional.ofNullable(this.syncMeta);
}
/**
* An absolute path that contains the thawed (resurrected) databases for the index.
* Cannot be defined in terms of a volume definition.
* Required. Splunk software does not start if an index lacks a valid thawedPath.
*
*/
@Import(name="thawedPath")
private @Nullable Output thawedPath;
/**
* @return An absolute path that contains the thawed (resurrected) databases for the index.
* Cannot be defined in terms of a volume definition.
* Required. Splunk software does not start if an index lacks a valid thawedPath.
*
*/
public Optional> thawedPath() {
return Optional.ofNullable(this.thawedPath);
}
/**
* Defines how frequently Splunk software checks for index throttling condition, in seconds. Defaults to 15 (seconds).
*
*/
@Import(name="throttleCheckPeriod")
private @Nullable Output throttleCheckPeriod;
/**
* @return Defines how frequently Splunk software checks for index throttling condition, in seconds. Defaults to 15 (seconds).
*
*/
public Optional> throttleCheckPeriod() {
return Optional.ofNullable(this.throttleCheckPeriod);
}
/**
* Location to store datamodel acceleration TSIDX data for this index. Restart splunkd after changing this parameter.
* If specified, it must be defined in terms of a volume definition.
*
*/
@Import(name="tstatsHomePath")
private @Nullable Output tstatsHomePath;
/**
* @return Location to store datamodel acceleration TSIDX data for this index. Restart splunkd after changing this parameter.
* If specified, it must be defined in terms of a volume definition.
*
*/
public Optional> tstatsHomePath() {
return Optional.ofNullable(this.tstatsHomePath);
}
/**
* Path to a script to run when moving data from warm to cold.
* This attribute is supported for backwards compatibility with Splunk software versions older than 4.0. Contact Splunk support if you need help configuring this setting.
*
*/
@Import(name="warmToColdScript")
private @Nullable Output warmToColdScript;
/**
* @return Path to a script to run when moving data from warm to cold.
* This attribute is supported for backwards compatibility with Splunk software versions older than 4.0. Contact Splunk support if you need help configuring this setting.
*
*/
public Optional> warmToColdScript() {
return Optional.ofNullable(this.warmToColdScript);
}
private IndexesArgs() {}
private IndexesArgs(IndexesArgs $) {
this.acl = $.acl;
this.blockSignSize = $.blockSignSize;
this.bucketRebuildMemoryHint = $.bucketRebuildMemoryHint;
this.coldPath = $.coldPath;
this.coldToFrozenDir = $.coldToFrozenDir;
this.coldToFrozenScript = $.coldToFrozenScript;
this.compressRawdata = $.compressRawdata;
this.datatype = $.datatype;
this.enableOnlineBucketRepair = $.enableOnlineBucketRepair;
this.frozenTimePeriodInSecs = $.frozenTimePeriodInSecs;
this.homePath = $.homePath;
this.maxBloomBackfillBucketAge = $.maxBloomBackfillBucketAge;
this.maxConcurrentOptimizes = $.maxConcurrentOptimizes;
this.maxDataSize = $.maxDataSize;
this.maxHotBuckets = $.maxHotBuckets;
this.maxHotIdleSecs = $.maxHotIdleSecs;
this.maxHotSpanSecs = $.maxHotSpanSecs;
this.maxMemMb = $.maxMemMb;
this.maxMetaEntries = $.maxMetaEntries;
this.maxTimeUnreplicatedNoAcks = $.maxTimeUnreplicatedNoAcks;
this.maxTimeUnreplicatedWithAcks = $.maxTimeUnreplicatedWithAcks;
this.maxTotalDataSizeMb = $.maxTotalDataSizeMb;
this.maxWarmDbCount = $.maxWarmDbCount;
this.minRawFileSyncSecs = $.minRawFileSyncSecs;
this.minStreamGroupQueueSize = $.minStreamGroupQueueSize;
this.name = $.name;
this.partialServiceMetaPeriod = $.partialServiceMetaPeriod;
this.processTrackerServiceInterval = $.processTrackerServiceInterval;
this.quarantineFutureSecs = $.quarantineFutureSecs;
this.quarantinePastSecs = $.quarantinePastSecs;
this.rawChunkSizeBytes = $.rawChunkSizeBytes;
this.repFactor = $.repFactor;
this.rotatePeriodInSecs = $.rotatePeriodInSecs;
this.serviceMetaPeriod = $.serviceMetaPeriod;
this.syncMeta = $.syncMeta;
this.thawedPath = $.thawedPath;
this.throttleCheckPeriod = $.throttleCheckPeriod;
this.tstatsHomePath = $.tstatsHomePath;
this.warmToColdScript = $.warmToColdScript;
}
public static Builder builder() {
return new Builder();
}
public static Builder builder(IndexesArgs defaults) {
return new Builder(defaults);
}
public static final class Builder {
private IndexesArgs $;
public Builder() {
$ = new IndexesArgs();
}
public Builder(IndexesArgs defaults) {
$ = new IndexesArgs(Objects.requireNonNull(defaults));
}
/**
* @param acl The app/user context that is the namespace for the resource
*
* @return builder
*
*/
public Builder acl(@Nullable Output acl) {
$.acl = acl;
return this;
}
/**
* @param acl The app/user context that is the namespace for the resource
*
* @return builder
*
*/
public Builder acl(IndexesAclArgs acl) {
return acl(Output.of(acl));
}
/**
* @param blockSignSize Controls how many events make up a block for block signatures. If this is set to 0, block signing is disabled for this index. <br>A recommended value is 100.
*
* @return builder
*
*/
public Builder blockSignSize(@Nullable Output blockSignSize) {
$.blockSignSize = blockSignSize;
return this;
}
/**
* @param blockSignSize Controls how many events make up a block for block signatures. If this is set to 0, block signing is disabled for this index. <br>A recommended value is 100.
*
* @return builder
*
*/
public Builder blockSignSize(Integer blockSignSize) {
return blockSignSize(Output.of(blockSignSize));
}
/**
* @param bucketRebuildMemoryHint Suggestion for the bucket rebuild process for the size of the time-series (tsidx) file to make.
* <be>Caution: This is an advanced parameter. Inappropriate use of this parameter causes splunkd to not start if rebuild is required. Do not set this parameter unless instructed by Splunk Support.
* Default value, auto, varies by the amount of physical RAM on the host<br>
* less than 2GB RAM = 67108864 (64MB) tsidx
* 2GB to 8GB RAM = 134217728 (128MB) tsidx
* more than 8GB RAM = 268435456 (256MB) tsidx<br>
* Values other than "auto" must be 16MB-1GB. Highest legal value (of the numerical part) is 4294967295 You can specify the value using a size suffix: "16777216" or "16MB" are equivalent.
*
* @return builder
*
*/
public Builder bucketRebuildMemoryHint(@Nullable Output bucketRebuildMemoryHint) {
$.bucketRebuildMemoryHint = bucketRebuildMemoryHint;
return this;
}
/**
* @param bucketRebuildMemoryHint Suggestion for the bucket rebuild process for the size of the time-series (tsidx) file to make.
* <be>Caution: This is an advanced parameter. Inappropriate use of this parameter causes splunkd to not start if rebuild is required. Do not set this parameter unless instructed by Splunk Support.
* Default value, auto, varies by the amount of physical RAM on the host<br>
* less than 2GB RAM = 67108864 (64MB) tsidx
* 2GB to 8GB RAM = 134217728 (128MB) tsidx
* more than 8GB RAM = 268435456 (256MB) tsidx<br>
* Values other than "auto" must be 16MB-1GB. Highest legal value (of the numerical part) is 4294967295 You can specify the value using a size suffix: "16777216" or "16MB" are equivalent.
*
* @return builder
*
*/
public Builder bucketRebuildMemoryHint(String bucketRebuildMemoryHint) {
return bucketRebuildMemoryHint(Output.of(bucketRebuildMemoryHint));
}
/**
* @param coldPath An absolute path that contains the colddbs for the index. The path must be readable and writable. Cold databases are opened as needed when searching.
*
* @return builder
*
*/
public Builder coldPath(@Nullable Output coldPath) {
$.coldPath = coldPath;
return this;
}
/**
* @param coldPath An absolute path that contains the colddbs for the index. The path must be readable and writable. Cold databases are opened as needed when searching.
*
* @return builder
*
*/
public Builder coldPath(String coldPath) {
return coldPath(Output.of(coldPath));
}
/**
* @param coldToFrozenDir Destination path for the frozen archive. Use as an alternative to a coldToFrozenScript. Splunk software automatically puts frozen buckets in this directory.
* <br>
* Bucket freezing policy is as follows:<br>
* New style buckets (4.2 and on): removes all files but the rawdata<br>
* To thaw, run splunk rebuild <bucket dir> on the bucket, then move to the thawed directory<br>
* Old style buckets (Pre-4.2): gzip all the .data and .tsidx files<br>
* To thaw, gunzip the zipped files and move the bucket into the thawed directory<br>
* If both coldToFrozenDir and coldToFrozenScript are specified, coldToFrozenDir takes precedence
*
* @return builder
*
*/
public Builder coldToFrozenDir(@Nullable Output coldToFrozenDir) {
$.coldToFrozenDir = coldToFrozenDir;
return this;
}
/**
* @param coldToFrozenDir Destination path for the frozen archive. Use as an alternative to a coldToFrozenScript. Splunk software automatically puts frozen buckets in this directory.
* <br>
* Bucket freezing policy is as follows:<br>
* New style buckets (4.2 and on): removes all files but the rawdata<br>
* To thaw, run splunk rebuild <bucket dir> on the bucket, then move to the thawed directory<br>
* Old style buckets (Pre-4.2): gzip all the .data and .tsidx files<br>
* To thaw, gunzip the zipped files and move the bucket into the thawed directory<br>
* If both coldToFrozenDir and coldToFrozenScript are specified, coldToFrozenDir takes precedence
*
* @return builder
*
*/
public Builder coldToFrozenDir(String coldToFrozenDir) {
return coldToFrozenDir(Output.of(coldToFrozenDir));
}
/**
* @param coldToFrozenScript Path to the archiving script.
* <br>If your script requires a program to run it (for example, python), specify the program followed by the path. The script must be in $SPLUNK_HOME/bin or one of its subdirectories.
* <br>Splunk software ships with an example archiving script in $SPLUNK_HOME/bin called coldToFrozenExample.py. DO NOT use this example script directly. It uses a default path, and if modified in place any changes are overwritten on upgrade.
* <br>It is best to copy the example script to a new file in bin and modify it for your system. Most importantly, change the default archive path to an existing directory that fits your needs.
*
* @return builder
*
*/
public Builder coldToFrozenScript(@Nullable Output coldToFrozenScript) {
$.coldToFrozenScript = coldToFrozenScript;
return this;
}
/**
* @param coldToFrozenScript Path to the archiving script.
* <br>If your script requires a program to run it (for example, python), specify the program followed by the path. The script must be in $SPLUNK_HOME/bin or one of its subdirectories.
* <br>Splunk software ships with an example archiving script in $SPLUNK_HOME/bin called coldToFrozenExample.py. DO NOT use this example script directly. It uses a default path, and if modified in place any changes are overwritten on upgrade.
* <br>It is best to copy the example script to a new file in bin and modify it for your system. Most importantly, change the default archive path to an existing directory that fits your needs.
*
* @return builder
*
*/
public Builder coldToFrozenScript(String coldToFrozenScript) {
return coldToFrozenScript(Output.of(coldToFrozenScript));
}
/**
* @param compressRawdata This parameter is ignored. The splunkd process always compresses raw data.
*
* @return builder
*
*/
public Builder compressRawdata(@Nullable Output compressRawdata) {
$.compressRawdata = compressRawdata;
return this;
}
/**
* @param compressRawdata This parameter is ignored. The splunkd process always compresses raw data.
*
* @return builder
*
*/
public Builder compressRawdata(Boolean compressRawdata) {
return compressRawdata(Output.of(compressRawdata));
}
/**
* @param datatype Valid values: (event | metric). Specifies the type of index.
*
* @return builder
*
*/
public Builder datatype(@Nullable Output datatype) {
$.datatype = datatype;
return this;
}
/**
* @param datatype Valid values: (event | metric). Specifies the type of index.
*
* @return builder
*
*/
public Builder datatype(String datatype) {
return datatype(Output.of(datatype));
}
/**
* @param enableOnlineBucketRepair Enables asynchronous "online fsck" bucket repair, which runs concurrently with Splunk software.
* When enabled, you do not have to wait until buckets are repaired to start the Splunk platform. However, you might observe a slight performance degratation.
*
* @return builder
*
*/
public Builder enableOnlineBucketRepair(@Nullable Output enableOnlineBucketRepair) {
$.enableOnlineBucketRepair = enableOnlineBucketRepair;
return this;
}
/**
* @param enableOnlineBucketRepair Enables asynchronous "online fsck" bucket repair, which runs concurrently with Splunk software.
* When enabled, you do not have to wait until buckets are repaired to start the Splunk platform. However, you might observe a slight performance degratation.
*
* @return builder
*
*/
public Builder enableOnlineBucketRepair(Boolean enableOnlineBucketRepair) {
return enableOnlineBucketRepair(Output.of(enableOnlineBucketRepair));
}
/**
* @param frozenTimePeriodInSecs Number of seconds after which indexed data rolls to frozen.
* Defaults to 188697600 (6 years).Freezing data means it is removed from the index. If you need to archive your data, refer to coldToFrozenDir and coldToFrozenScript parameter documentation.
*
* @return builder
*
*/
public Builder frozenTimePeriodInSecs(@Nullable Output frozenTimePeriodInSecs) {
$.frozenTimePeriodInSecs = frozenTimePeriodInSecs;
return this;
}
/**
* @param frozenTimePeriodInSecs Number of seconds after which indexed data rolls to frozen.
* Defaults to 188697600 (6 years).Freezing data means it is removed from the index. If you need to archive your data, refer to coldToFrozenDir and coldToFrozenScript parameter documentation.
*
* @return builder
*
*/
public Builder frozenTimePeriodInSecs(Integer frozenTimePeriodInSecs) {
return frozenTimePeriodInSecs(Output.of(frozenTimePeriodInSecs));
}
/**
* @param homePath An absolute path that contains the hot and warm buckets for the index.
* Required. Splunk software does not start if an index lacks a valid homePath.
* <br>Caution: The path must be readable and writable.
*
* @return builder
*
*/
public Builder homePath(@Nullable Output homePath) {
$.homePath = homePath;
return this;
}
/**
* @param homePath An absolute path that contains the hot and warm buckets for the index.
* Required. Splunk software does not start if an index lacks a valid homePath.
* <br>Caution: The path must be readable and writable.
*
* @return builder
*
*/
public Builder homePath(String homePath) {
return homePath(Output.of(homePath));
}
/**
* @param maxBloomBackfillBucketAge Valid values are: Integer[m|s|h|d].
* <br>If a warm or cold bucket is older than the specified age, do not create or rebuild its bloomfilter. Specify 0 to never rebuild bloomfilters.
*
* @return builder
*
*/
public Builder maxBloomBackfillBucketAge(@Nullable Output maxBloomBackfillBucketAge) {
$.maxBloomBackfillBucketAge = maxBloomBackfillBucketAge;
return this;
}
/**
* @param maxBloomBackfillBucketAge Valid values are: Integer[m|s|h|d].
* <br>If a warm or cold bucket is older than the specified age, do not create or rebuild its bloomfilter. Specify 0 to never rebuild bloomfilters.
*
* @return builder
*
*/
public Builder maxBloomBackfillBucketAge(String maxBloomBackfillBucketAge) {
return maxBloomBackfillBucketAge(Output.of(maxBloomBackfillBucketAge));
}
/**
* @param maxConcurrentOptimizes The number of concurrent optimize processes that can run against a hot bucket.
* This number should be increased if instructed by Splunk Support. Typically the default value should suffice.
*
* @return builder
*
*/
public Builder maxConcurrentOptimizes(@Nullable Output maxConcurrentOptimizes) {
$.maxConcurrentOptimizes = maxConcurrentOptimizes;
return this;
}
/**
* @param maxConcurrentOptimizes The number of concurrent optimize processes that can run against a hot bucket.
* This number should be increased if instructed by Splunk Support. Typically the default value should suffice.
*
* @return builder
*
*/
public Builder maxConcurrentOptimizes(Integer maxConcurrentOptimizes) {
return maxConcurrentOptimizes(Output.of(maxConcurrentOptimizes));
}
/**
* @param maxDataSize The maximum size in MB for a hot DB to reach before a roll to warm is triggered. Specifying "auto" or "auto_high_volume" causes Splunk software to autotune this parameter (recommended).
* Use "auto_high_volume" for high volume indexes (such as the main index); otherwise, use "auto". A "high volume index" would typically be considered one that gets over 10GB of data per day.
*
* @return builder
*
*/
public Builder maxDataSize(@Nullable Output maxDataSize) {
$.maxDataSize = maxDataSize;
return this;
}
/**
* @param maxDataSize The maximum size in MB for a hot DB to reach before a roll to warm is triggered. Specifying "auto" or "auto_high_volume" causes Splunk software to autotune this parameter (recommended).
* Use "auto_high_volume" for high volume indexes (such as the main index); otherwise, use "auto". A "high volume index" would typically be considered one that gets over 10GB of data per day.
*
* @return builder
*
*/
public Builder maxDataSize(String maxDataSize) {
return maxDataSize(Output.of(maxDataSize));
}
/**
* @param maxHotBuckets Maximum hot buckets that can exist per index. Defaults to 3.
* <br>When maxHotBuckets is exceeded, Splunk software rolls the least recently used (LRU) hot bucket to warm. Both normal hot buckets and quarantined hot buckets count towards this total. This setting operates independently of maxHotIdleSecs, which can also cause hot buckets to roll.
*
* @return builder
*
*/
public Builder maxHotBuckets(@Nullable Output maxHotBuckets) {
$.maxHotBuckets = maxHotBuckets;
return this;
}
/**
* @param maxHotBuckets Maximum hot buckets that can exist per index. Defaults to 3.
* <br>When maxHotBuckets is exceeded, Splunk software rolls the least recently used (LRU) hot bucket to warm. Both normal hot buckets and quarantined hot buckets count towards this total. This setting operates independently of maxHotIdleSecs, which can also cause hot buckets to roll.
*
* @return builder
*
*/
public Builder maxHotBuckets(Integer maxHotBuckets) {
return maxHotBuckets(Output.of(maxHotBuckets));
}
/**
* @param maxHotIdleSecs Maximum life, in seconds, of a hot bucket. Defaults to 0. If a hot bucket exceeds maxHotIdleSecs, Splunk software rolls it to warm. This setting operates independently of maxHotBuckets, which can also cause hot buckets to roll. A value of 0 turns off the idle check (equivalent to INFINITE idle time).
*
* @return builder
*
*/
public Builder maxHotIdleSecs(@Nullable Output maxHotIdleSecs) {
$.maxHotIdleSecs = maxHotIdleSecs;
return this;
}
/**
* @param maxHotIdleSecs Maximum life, in seconds, of a hot bucket. Defaults to 0. If a hot bucket exceeds maxHotIdleSecs, Splunk software rolls it to warm. This setting operates independently of maxHotBuckets, which can also cause hot buckets to roll. A value of 0 turns off the idle check (equivalent to INFINITE idle time).
*
* @return builder
*
*/
public Builder maxHotIdleSecs(Integer maxHotIdleSecs) {
return maxHotIdleSecs(Output.of(maxHotIdleSecs));
}
/**
* @param maxHotSpanSecs Upper bound of target maximum timespan of hot/warm buckets in seconds. Defaults to 7776000 seconds (90 days).
*
* @return builder
*
*/
public Builder maxHotSpanSecs(@Nullable Output maxHotSpanSecs) {
$.maxHotSpanSecs = maxHotSpanSecs;
return this;
}
/**
* @param maxHotSpanSecs Upper bound of target maximum timespan of hot/warm buckets in seconds. Defaults to 7776000 seconds (90 days).
*
* @return builder
*
*/
public Builder maxHotSpanSecs(Integer maxHotSpanSecs) {
return maxHotSpanSecs(Output.of(maxHotSpanSecs));
}
/**
* @param maxMemMb The amount of memory, expressed in MB, to allocate for buffering a single tsidx file into memory before flushing to disk. Defaults to 5. The default is recommended for all environments.
*
* @return builder
*
*/
public Builder maxMemMb(@Nullable Output maxMemMb) {
$.maxMemMb = maxMemMb;
return this;
}
/**
* @param maxMemMb The amount of memory, expressed in MB, to allocate for buffering a single tsidx file into memory before flushing to disk. Defaults to 5. The default is recommended for all environments.
*
* @return builder
*
*/
public Builder maxMemMb(Integer maxMemMb) {
return maxMemMb(Output.of(maxMemMb));
}
/**
* @param maxMetaEntries Upper limit, in seconds, on how long an event can sit in raw slice. Applies only if replication is enabled for this index. Otherwise ignored. If there are any acknowledged events sharing this raw slice, this paramater does not apply. In this case, maxTimeUnreplicatedWithAcks applies. Highest legal value is 2147483647. To disable this parameter, set to 0.
*
* @return builder
*
*/
public Builder maxMetaEntries(@Nullable Output maxMetaEntries) {
$.maxMetaEntries = maxMetaEntries;
return this;
}
/**
* @param maxMetaEntries Upper limit, in seconds, on how long an event can sit in raw slice. Applies only if replication is enabled for this index. Otherwise ignored. If there are any acknowledged events sharing this raw slice, this paramater does not apply. In this case, maxTimeUnreplicatedWithAcks applies. Highest legal value is 2147483647. To disable this parameter, set to 0.
*
* @return builder
*
*/
public Builder maxMetaEntries(Integer maxMetaEntries) {
return maxMetaEntries(Output.of(maxMetaEntries));
}
/**
* @param maxTimeUnreplicatedNoAcks Upper limit, in seconds, on how long an event can sit in raw slice. Applies only if replication is enabled for this index. Otherwise ignored.
* If there are any acknowledged events sharing this raw slice, this paramater does not apply. In this case, maxTimeUnreplicatedWithAcks applies.
* Highest legal value is 2147483647. To disable this parameter, set to 0.
*
* @return builder
*
*/
public Builder maxTimeUnreplicatedNoAcks(@Nullable Output maxTimeUnreplicatedNoAcks) {
$.maxTimeUnreplicatedNoAcks = maxTimeUnreplicatedNoAcks;
return this;
}
/**
* @param maxTimeUnreplicatedNoAcks Upper limit, in seconds, on how long an event can sit in raw slice. Applies only if replication is enabled for this index. Otherwise ignored.
* If there are any acknowledged events sharing this raw slice, this paramater does not apply. In this case, maxTimeUnreplicatedWithAcks applies.
* Highest legal value is 2147483647. To disable this parameter, set to 0.
*
* @return builder
*
*/
public Builder maxTimeUnreplicatedNoAcks(Integer maxTimeUnreplicatedNoAcks) {
return maxTimeUnreplicatedNoAcks(Output.of(maxTimeUnreplicatedNoAcks));
}
/**
* @param maxTimeUnreplicatedWithAcks Upper limit, in seconds, on how long events can sit unacknowledged in a raw slice. Applies only if you have enabled acks on forwarders and have replication enabled (with clustering).
* Note: This is an advanced parameter. Make sure you understand the settings on all forwarders before changing this. This number should not exceed ack timeout configured on any forwarder, and should actually be set to at most half of the minimum value of that timeout. You can find this setting in outputs.conf readTimeout setting under the tcpout stanza.
* To disable, set to 0, but this is NOT recommended. Highest legal value is 2147483647.
*
* @return builder
*
*/
public Builder maxTimeUnreplicatedWithAcks(@Nullable Output maxTimeUnreplicatedWithAcks) {
$.maxTimeUnreplicatedWithAcks = maxTimeUnreplicatedWithAcks;
return this;
}
/**
* @param maxTimeUnreplicatedWithAcks Upper limit, in seconds, on how long events can sit unacknowledged in a raw slice. Applies only if you have enabled acks on forwarders and have replication enabled (with clustering).
* Note: This is an advanced parameter. Make sure you understand the settings on all forwarders before changing this. This number should not exceed ack timeout configured on any forwarder, and should actually be set to at most half of the minimum value of that timeout. You can find this setting in outputs.conf readTimeout setting under the tcpout stanza.
* To disable, set to 0, but this is NOT recommended. Highest legal value is 2147483647.
*
* @return builder
*
*/
public Builder maxTimeUnreplicatedWithAcks(Integer maxTimeUnreplicatedWithAcks) {
return maxTimeUnreplicatedWithAcks(Output.of(maxTimeUnreplicatedWithAcks));
}
/**
* @param maxTotalDataSizeMb The maximum size of an index (in MB). If an index grows larger than the maximum size, the oldest data is frozen.
*
* @return builder
*
*/
public Builder maxTotalDataSizeMb(@Nullable Output maxTotalDataSizeMb) {
$.maxTotalDataSizeMb = maxTotalDataSizeMb;
return this;
}
/**
* @param maxTotalDataSizeMb The maximum size of an index (in MB). If an index grows larger than the maximum size, the oldest data is frozen.
*
* @return builder
*
*/
public Builder maxTotalDataSizeMb(Integer maxTotalDataSizeMb) {
return maxTotalDataSizeMb(Output.of(maxTotalDataSizeMb));
}
/**
* @param maxWarmDbCount The maximum number of warm buckets. If this number is exceeded, the warm bucket/s with the lowest value for their latest times is moved to cold.
*
* @return builder
*
*/
public Builder maxWarmDbCount(@Nullable Output maxWarmDbCount) {
$.maxWarmDbCount = maxWarmDbCount;
return this;
}
/**
* @param maxWarmDbCount The maximum number of warm buckets. If this number is exceeded, the warm bucket/s with the lowest value for their latest times is moved to cold.
*
* @return builder
*
*/
public Builder maxWarmDbCount(Integer maxWarmDbCount) {
return maxWarmDbCount(Output.of(maxWarmDbCount));
}
/**
* @param minRawFileSyncSecs Specify an integer (or "disable") for this parameter.
* This parameter sets how frequently splunkd forces a filesystem sync while compressing journal slices.
* During this period, uncompressed slices are left on disk even after they are compressed. Then splunkd forces a filesystem sync of the compressed journal and removes the accumulated uncompressed files.
* If 0 is specified, splunkd forces a filesystem sync after every slice completes compressing. Specifying "disable" disables syncing entirely: uncompressed slices are removed as soon as compression is complete.
*
* @return builder
*
*/
public Builder minRawFileSyncSecs(@Nullable Output minRawFileSyncSecs) {
$.minRawFileSyncSecs = minRawFileSyncSecs;
return this;
}
/**
* @param minRawFileSyncSecs Specify an integer (or "disable") for this parameter.
* This parameter sets how frequently splunkd forces a filesystem sync while compressing journal slices.
* During this period, uncompressed slices are left on disk even after they are compressed. Then splunkd forces a filesystem sync of the compressed journal and removes the accumulated uncompressed files.
* If 0 is specified, splunkd forces a filesystem sync after every slice completes compressing. Specifying "disable" disables syncing entirely: uncompressed slices are removed as soon as compression is complete.
*
* @return builder
*
*/
public Builder minRawFileSyncSecs(String minRawFileSyncSecs) {
return minRawFileSyncSecs(Output.of(minRawFileSyncSecs));
}
/**
* @param minStreamGroupQueueSize Minimum size of the queue that stores events in memory before committing them to a tsidx file.
*
* @return builder
*
*/
public Builder minStreamGroupQueueSize(@Nullable Output minStreamGroupQueueSize) {
$.minStreamGroupQueueSize = minStreamGroupQueueSize;
return this;
}
/**
* @param minStreamGroupQueueSize Minimum size of the queue that stores events in memory before committing them to a tsidx file.
*
* @return builder
*
*/
public Builder minStreamGroupQueueSize(Integer minStreamGroupQueueSize) {
return minStreamGroupQueueSize(Output.of(minStreamGroupQueueSize));
}
/**
* @param name The name of the index to create.
*
* @return builder
*
*/
public Builder name(@Nullable Output name) {
$.name = name;
return this;
}
/**
* @param name The name of the index to create.
*
* @return builder
*
*/
public Builder name(String name) {
return name(Output.of(name));
}
/**
* @param partialServiceMetaPeriod Related to serviceMetaPeriod. If set, it enables metadata sync every <integer> seconds, but only for records where the sync can be done efficiently in-place, without requiring a full re-write of the metadata file. Records that require full re-write are be sync'ed at serviceMetaPeriod.
* partialServiceMetaPeriod specifies, in seconds, how frequently it should sync. Zero means that this feature is turned off and serviceMetaPeriod is the only time when metadata sync happens.
* If the value of partialServiceMetaPeriod is greater than serviceMetaPeriod, this setting has no effect.
* By default it is turned off (zero).
*
* @return builder
*
*/
public Builder partialServiceMetaPeriod(@Nullable Output partialServiceMetaPeriod) {
$.partialServiceMetaPeriod = partialServiceMetaPeriod;
return this;
}
/**
* @param partialServiceMetaPeriod Related to serviceMetaPeriod. If set, it enables metadata sync every <integer> seconds, but only for records where the sync can be done efficiently in-place, without requiring a full re-write of the metadata file. Records that require full re-write are be sync'ed at serviceMetaPeriod.
* partialServiceMetaPeriod specifies, in seconds, how frequently it should sync. Zero means that this feature is turned off and serviceMetaPeriod is the only time when metadata sync happens.
* If the value of partialServiceMetaPeriod is greater than serviceMetaPeriod, this setting has no effect.
* By default it is turned off (zero).
*
* @return builder
*
*/
public Builder partialServiceMetaPeriod(Integer partialServiceMetaPeriod) {
return partialServiceMetaPeriod(Output.of(partialServiceMetaPeriod));
}
/**
* @param processTrackerServiceInterval Specifies, in seconds, how often the indexer checks the status of the child OS processes it launched to see if it can launch new processes for queued requests. Defaults to 15.
* If set to 0, the indexer checks child process status every second.
* Highest legal value is 4294967295.
*
* @return builder
*
*/
public Builder processTrackerServiceInterval(@Nullable Output processTrackerServiceInterval) {
$.processTrackerServiceInterval = processTrackerServiceInterval;
return this;
}
/**
* @param processTrackerServiceInterval Specifies, in seconds, how often the indexer checks the status of the child OS processes it launched to see if it can launch new processes for queued requests. Defaults to 15.
* If set to 0, the indexer checks child process status every second.
* Highest legal value is 4294967295.
*
* @return builder
*
*/
public Builder processTrackerServiceInterval(Integer processTrackerServiceInterval) {
return processTrackerServiceInterval(Output.of(processTrackerServiceInterval));
}
/**
* @param quarantineFutureSecs Events with timestamp of quarantineFutureSecs newer than "now" are dropped into quarantine bucket. Defaults to 2592000 (30 days).
* This is a mechanism to prevent main hot buckets from being polluted with fringe events.
*
* @return builder
*
*/
public Builder quarantineFutureSecs(@Nullable Output quarantineFutureSecs) {
$.quarantineFutureSecs = quarantineFutureSecs;
return this;
}
/**
* @param quarantineFutureSecs Events with timestamp of quarantineFutureSecs newer than "now" are dropped into quarantine bucket. Defaults to 2592000 (30 days).
* This is a mechanism to prevent main hot buckets from being polluted with fringe events.
*
* @return builder
*
*/
public Builder quarantineFutureSecs(Integer quarantineFutureSecs) {
return quarantineFutureSecs(Output.of(quarantineFutureSecs));
}
/**
* @param quarantinePastSecs Events with timestamp of quarantinePastSecs older than "now" are dropped into quarantine bucket. Defaults to 77760000 (900 days). This is a mechanism to prevent the main hot buckets from being polluted with fringe events.
*
* @return builder
*
*/
public Builder quarantinePastSecs(@Nullable Output quarantinePastSecs) {
$.quarantinePastSecs = quarantinePastSecs;
return this;
}
/**
* @param quarantinePastSecs Events with timestamp of quarantinePastSecs older than "now" are dropped into quarantine bucket. Defaults to 77760000 (900 days). This is a mechanism to prevent the main hot buckets from being polluted with fringe events.
*
* @return builder
*
*/
public Builder quarantinePastSecs(Integer quarantinePastSecs) {
return quarantinePastSecs(Output.of(quarantinePastSecs));
}
/**
* @param rawChunkSizeBytes Target uncompressed size in bytes for individual raw slice in the rawdata journal of the index. Defaults to 131072 (128KB). 0 is not a valid value. If 0 is specified, rawChunkSizeBytes is set to the default value.
*
* @return builder
*
*/
public Builder rawChunkSizeBytes(@Nullable Output rawChunkSizeBytes) {
$.rawChunkSizeBytes = rawChunkSizeBytes;
return this;
}
/**
* @param rawChunkSizeBytes Target uncompressed size in bytes for individual raw slice in the rawdata journal of the index. Defaults to 131072 (128KB). 0 is not a valid value. If 0 is specified, rawChunkSizeBytes is set to the default value.
*
* @return builder
*
*/
public Builder rawChunkSizeBytes(Integer rawChunkSizeBytes) {
return rawChunkSizeBytes(Output.of(rawChunkSizeBytes));
}
/**
* @param repFactor Index replication control. This parameter applies to only clustering slaves.
* auto = Use the master index replication configuration value.
* 0 = Turn off replication for this index.
*
* @return builder
*
*/
public Builder repFactor(@Nullable Output repFactor) {
$.repFactor = repFactor;
return this;
}
/**
* @param repFactor Index replication control. This parameter applies to only clustering slaves.
* auto = Use the master index replication configuration value.
* 0 = Turn off replication for this index.
*
* @return builder
*
*/
public Builder repFactor(String repFactor) {
return repFactor(Output.of(repFactor));
}
/**
* @param rotatePeriodInSecs How frequently (in seconds) to check if a new hot bucket needs to be created. Also, how frequently to check if there are any warm/cold buckets that should be rolled/frozen.
*
* @return builder
*
*/
public Builder rotatePeriodInSecs(@Nullable Output rotatePeriodInSecs) {
$.rotatePeriodInSecs = rotatePeriodInSecs;
return this;
}
/**
* @param rotatePeriodInSecs How frequently (in seconds) to check if a new hot bucket needs to be created. Also, how frequently to check if there are any warm/cold buckets that should be rolled/frozen.
*
* @return builder
*
*/
public Builder rotatePeriodInSecs(Integer rotatePeriodInSecs) {
return rotatePeriodInSecs(Output.of(rotatePeriodInSecs));
}
/**
* @param serviceMetaPeriod Defines how frequently metadata is synced to disk, in seconds. Defaults to 25 (seconds).
* You may want to set this to a higher value if the sum of your metadata file sizes is larger than many tens of megabytes, to avoid the hit on I/O in the indexing fast path.
*
* @return builder
*
*/
public Builder serviceMetaPeriod(@Nullable Output serviceMetaPeriod) {
$.serviceMetaPeriod = serviceMetaPeriod;
return this;
}
/**
* @param serviceMetaPeriod Defines how frequently metadata is synced to disk, in seconds. Defaults to 25 (seconds).
* You may want to set this to a higher value if the sum of your metadata file sizes is larger than many tens of megabytes, to avoid the hit on I/O in the indexing fast path.
*
* @return builder
*
*/
public Builder serviceMetaPeriod(Integer serviceMetaPeriod) {
return serviceMetaPeriod(Output.of(serviceMetaPeriod));
}
/**
* @param syncMeta When true, a sync operation is called before file descriptor is closed on metadata file updates. This functionality improves integrity of metadata files, especially in regards to operating system crashes/machine failures.
*
* @return builder
*
*/
public Builder syncMeta(@Nullable Output syncMeta) {
$.syncMeta = syncMeta;
return this;
}
/**
* @param syncMeta When true, a sync operation is called before file descriptor is closed on metadata file updates. This functionality improves integrity of metadata files, especially in regards to operating system crashes/machine failures.
*
* @return builder
*
*/
public Builder syncMeta(Boolean syncMeta) {
return syncMeta(Output.of(syncMeta));
}
/**
* @param thawedPath An absolute path that contains the thawed (resurrected) databases for the index.
* Cannot be defined in terms of a volume definition.
* Required. Splunk software does not start if an index lacks a valid thawedPath.
*
* @return builder
*
*/
public Builder thawedPath(@Nullable Output thawedPath) {
$.thawedPath = thawedPath;
return this;
}
/**
* @param thawedPath An absolute path that contains the thawed (resurrected) databases for the index.
* Cannot be defined in terms of a volume definition.
* Required. Splunk software does not start if an index lacks a valid thawedPath.
*
* @return builder
*
*/
public Builder thawedPath(String thawedPath) {
return thawedPath(Output.of(thawedPath));
}
/**
* @param throttleCheckPeriod Defines how frequently Splunk software checks for index throttling condition, in seconds. Defaults to 15 (seconds).
*
* @return builder
*
*/
public Builder throttleCheckPeriod(@Nullable Output throttleCheckPeriod) {
$.throttleCheckPeriod = throttleCheckPeriod;
return this;
}
/**
* @param throttleCheckPeriod Defines how frequently Splunk software checks for index throttling condition, in seconds. Defaults to 15 (seconds).
*
* @return builder
*
*/
public Builder throttleCheckPeriod(Integer throttleCheckPeriod) {
return throttleCheckPeriod(Output.of(throttleCheckPeriod));
}
/**
* @param tstatsHomePath Location to store datamodel acceleration TSIDX data for this index. Restart splunkd after changing this parameter.
* If specified, it must be defined in terms of a volume definition.
*
* @return builder
*
*/
public Builder tstatsHomePath(@Nullable Output tstatsHomePath) {
$.tstatsHomePath = tstatsHomePath;
return this;
}
/**
* @param tstatsHomePath Location to store datamodel acceleration TSIDX data for this index. Restart splunkd after changing this parameter.
* If specified, it must be defined in terms of a volume definition.
*
* @return builder
*
*/
public Builder tstatsHomePath(String tstatsHomePath) {
return tstatsHomePath(Output.of(tstatsHomePath));
}
/**
* @param warmToColdScript Path to a script to run when moving data from warm to cold.
* This attribute is supported for backwards compatibility with Splunk software versions older than 4.0. Contact Splunk support if you need help configuring this setting.
*
* @return builder
*
*/
public Builder warmToColdScript(@Nullable Output warmToColdScript) {
$.warmToColdScript = warmToColdScript;
return this;
}
/**
* @param warmToColdScript Path to a script to run when moving data from warm to cold.
* This attribute is supported for backwards compatibility with Splunk software versions older than 4.0. Contact Splunk support if you need help configuring this setting.
*
* @return builder
*
*/
public Builder warmToColdScript(String warmToColdScript) {
return warmToColdScript(Output.of(warmToColdScript));
}
public IndexesArgs build() {
return $;
}
}
}