com.amazonaws.services.fsx.model.CreateFileSystemLustreConfiguration Maven / Gradle / Ivy
Show all versions of aws-java-sdk-fsx Show documentation
/*
* Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.fsx.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
*
* The Lustre configuration for the file system being created.
*
*
*
* The following parameters are not supported for file systems with a data repository association created with .
*
*
* -
*
* AutoImportPolicy
*
*
* -
*
* ExportPath
*
*
* -
*
* ImportedFileChunkSize
*
*
* -
*
* ImportPath
*
*
*
*
*
* @see AWS API Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class CreateFileSystemLustreConfiguration implements Serializable, Cloneable, StructuredPojo {
/**
*
* (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where
* d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.
*
*/
private String weeklyMaintenanceStartTime;
/**
*
* (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped
* to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix
. If
* you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file
* system.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*/
private String importPath;
/**
*
* (Optional) Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported.
* The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to
* which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an
* ExportPath
value is not provided, Amazon FSx sets a default export path,
* s3://import-bucket/FSxLustre[creation-timestamp]
. The timestamp is in UTC format, for example
* s3://import-bucket/FSxLustre20181105T222312Z
.
*
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If you
* specify only a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file system objects
* to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a
* custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix]
, Amazon FSx
* exports the contents of your file system to that export prefix in the Amazon S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*/
private String exportPath;
/**
*
* (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount
* of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be
* striped across is limited by the total number of disks that make up the file system.
*
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a
* maximum size of 5 TB.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*/
private Integer importedFileChunkSize;
/**
*
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need temporary
* storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit
* encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
*
* Choose PERSISTENT_1
for longer-term storage and for throughput-focused workloads that aren’t
* latency-sensitive. PERSISTENT_1
supports encryption of data in transit, and is available in all
* Amazon Web Services Regions in which FSx for Lustre is available.
*
*
* Choose PERSISTENT_2
for longer-term storage and for latency-sensitive workloads that require the
* highest levels of IOPS/throughput. PERSISTENT_2
supports SSD storage, and offers higher
* PerUnitStorageThroughput
(up to 1000 MB/s/TiB). You can optionally specify a metadata configuration
* mode for PERSISTENT_2
which supports increasing metadata performance. PERSISTENT_2
is
* available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of
* Amazon Web Services Regions in which PERSISTENT_2
is available, see File
* system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.
*
*
*
* If you choose PERSISTENT_2
, and you set FileSystemTypeVersion
to 2.10
, the
* CreateFileSystem
operation fails.
*
*
*
* Encryption of data in transit is automatically turned on when you access SCRATCH_2
,
* PERSISTENT_1
, and PERSISTENT_2
file systems from Amazon EC2 instances that support
* automatic encryption in the Amazon Web Services Regions where they are available. For more information about
* encryption in transit for FSx for Lustre file systems, see Encrypting data in
* transit in the Amazon FSx for Lustre User Guide.
*
*
* (Default = SCRATCH_1
)
*
*/
private String deploymentType;
/**
*
* (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use
* this parameter to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify
* objects in your linked S3 bucket. AutoImportPolicy
can have the following values:
*
*
* -
*
* NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the
* linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or
* changed objects after choosing this option.
*
*
* -
*
* NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added
* to the linked S3 bucket that do not currently exist in the FSx file system.
*
*
* -
*
* NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any
* new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose
* this option.
*
*
* -
*
* NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory listings
* of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any
* objects that were deleted in the S3 bucket.
*
*
*
*
* For more information, see
* Automatically import updates from your S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*/
private String autoImportPolicy;
/**
*
* Required with PERSISTENT_1
and PERSISTENT_2
deployment types, provisions the amount of
* read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system
* throughput capacity is calculated by multiplying file system storage capacity (TiB) by the
* PerUnitStorageThroughput
(MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of
* PerUnitStorageThroughput
yields 120 MB/s of file system throughput. You pay for the amount of
* throughput that you provision.
*
*
* Valid values:
*
*
* -
*
* For PERSISTENT_1
SSD storage: 50, 100, 200 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_1
HDD storage: 12, 40 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_2
SSD storage: 125, 250, 500, 1000 MB/s/TiB.
*
*
*
*/
private Integer perUnitStorageThroughput;
private String dailyAutomaticBackupStartTime;
/**
*
* The number of days to retain automatic backups. Setting this property to 0
disables automatic
* backups. You can retain automatic backups for a maximum of 90 days. The default is 0
.
*
*/
private Integer automaticBackupRetentionDays;
/**
*
* (Optional) Not available for use with file systems that are linked to a data repository. A boolean flag
* indicating whether tags for the file system should be copied to backups. The default value is false. If
* CopyTagsToBackups
is set to true, all file system tags are copied to all automatic and
* user-initiated backups when the user doesn't specify any backup-specific tags. If CopyTagsToBackups
* is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you
* specify one or more tags when creating a user-initiated backup, no tags are copied from the file system,
* regardless of this value.
*
*
* (Default = false
)
*
*
* For more information, see
* Working with backups in the Amazon FSx for Lustre User Guide.
*
*/
private Boolean copyTagsToBackups;
/**
*
* The type of drive cache used by PERSISTENT_1
file systems that are provisioned with HDD storage
* devices. This parameter is required when storage type is HDD. Set this property to READ
to improve
* the performance for frequently accessed files by caching up to 20% of the total storage capacity of the file
* system.
*
*
* This parameter is required when StorageType
is set to HDD
.
*
*/
private String driveCacheType;
/**
*
* Sets the data compression configuration for the file system. DataCompressionType
can have the
* following values:
*
*
* -
*
* NONE
- (Default) Data compression is turned off when the file system is created.
*
*
* -
*
* LZ4
- Data compression is turned on with the LZ4 algorithm.
*
*
*
*
* For more information, see Lustre data compression in
* the Amazon FSx for Lustre User Guide.
*
*/
private String dataCompressionType;
/**
*
* The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is
* enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon
* CloudWatch Logs.
*
*/
private LustreLogCreateConfiguration logConfiguration;
/**
*
* The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root
* squash restricts root-level access from clients that try to access your file system as a root user.
*
*/
private LustreRootSquashConfiguration rootSquashConfiguration;
/**
*
* The Lustre metadata performance configuration for the creation of an FSx for Lustre file system using a
* PERSISTENT_2
deployment type.
*
*/
private CreateFileSystemLustreMetadataConfiguration metadataConfiguration;
/**
*
* (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where
* d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.
*
*
* @param weeklyMaintenanceStartTime
* (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone,
* where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.
*/
public void setWeeklyMaintenanceStartTime(String weeklyMaintenanceStartTime) {
this.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime;
}
/**
*
* (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where
* d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.
*
*
* @return (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time
* zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.
*/
public String getWeeklyMaintenanceStartTime() {
return this.weeklyMaintenanceStartTime;
}
/**
*
* (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where
* d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.
*
*
* @param weeklyMaintenanceStartTime
* (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone,
* where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withWeeklyMaintenanceStartTime(String weeklyMaintenanceStartTime) {
setWeeklyMaintenanceStartTime(weeklyMaintenanceStartTime);
return this;
}
/**
*
* (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped
* to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix
. If
* you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file
* system.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @param importPath
* (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be
* mapped to the root of the Amazon S3 bucket you select. An example is
* s3://import-bucket/optional-prefix
. If you specify a prefix after the Amazon S3 bucket name,
* only object keys with that prefix are loaded into the file system.
*
* This parameter is not supported for file systems with a data repository association.
*
*/
public void setImportPath(String importPath) {
this.importPath = importPath;
}
/**
*
* (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped
* to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix
. If
* you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file
* system.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @return (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will
* be mapped to the root of the Amazon S3 bucket you select. An example is
* s3://import-bucket/optional-prefix
. If you specify a prefix after the Amazon S3 bucket name,
* only object keys with that prefix are loaded into the file system.
*
* This parameter is not supported for file systems with a data repository association.
*
*/
public String getImportPath() {
return this.importPath;
}
/**
*
* (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped
* to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix
. If
* you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file
* system.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @param importPath
* (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be
* mapped to the root of the Amazon S3 bucket you select. An example is
* s3://import-bucket/optional-prefix
. If you specify a prefix after the Amazon S3 bucket name,
* only object keys with that prefix are loaded into the file system.
*
* This parameter is not supported for file systems with a data repository association.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withImportPath(String importPath) {
setImportPath(importPath);
return this;
}
/**
*
* (Optional) Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported.
* The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to
* which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an
* ExportPath
value is not provided, Amazon FSx sets a default export path,
* s3://import-bucket/FSxLustre[creation-timestamp]
. The timestamp is in UTC format, for example
* s3://import-bucket/FSxLustre20181105T222312Z
.
*
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If you
* specify only a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file system objects
* to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a
* custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix]
, Amazon FSx
* exports the contents of your file system to that export prefix in the Amazon S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @param exportPath
* (Optional) Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is
* exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an
* optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file
* system. If an ExportPath
value is not provided, Amazon FSx sets a default export path,
* s3://import-bucket/FSxLustre[creation-timestamp]
. The timestamp is in UTC format, for example
* s3://import-bucket/FSxLustre20181105T222312Z
.
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If
* you specify only a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file
* system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on
* export. If you provide a custom prefix in the export path, such as
* s3://import-bucket/[custom-optional-prefix]
, Amazon FSx exports the contents of your file
* system to that export prefix in the Amazon S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*/
public void setExportPath(String exportPath) {
this.exportPath = exportPath;
}
/**
*
* (Optional) Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported.
* The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to
* which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an
* ExportPath
value is not provided, Amazon FSx sets a default export path,
* s3://import-bucket/FSxLustre[creation-timestamp]
. The timestamp is in UTC format, for example
* s3://import-bucket/FSxLustre20181105T222312Z
.
*
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If you
* specify only a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file system objects
* to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a
* custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix]
, Amazon FSx
* exports the contents of your file system to that export prefix in the Amazon S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @return (Optional) Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is
* exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an
* optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file
* system. If an ExportPath
value is not provided, Amazon FSx sets a default export path,
* s3://import-bucket/FSxLustre[creation-timestamp]
. The timestamp is in UTC format, for
* example s3://import-bucket/FSxLustre20181105T222312Z
.
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
.
* If you specify only a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file
* system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on
* export. If you provide a custom prefix in the export path, such as
* s3://import-bucket/[custom-optional-prefix]
, Amazon FSx exports the contents of your file
* system to that export prefix in the Amazon S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*/
public String getExportPath() {
return this.exportPath;
}
/**
*
* (Optional) Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported.
* The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to
* which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an
* ExportPath
value is not provided, Amazon FSx sets a default export path,
* s3://import-bucket/FSxLustre[creation-timestamp]
. The timestamp is in UTC format, for example
* s3://import-bucket/FSxLustre20181105T222312Z
.
*
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If you
* specify only a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file system objects
* to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a
* custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix]
, Amazon FSx
* exports the contents of your file system to that export prefix in the Amazon S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @param exportPath
* (Optional) Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is
* exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an
* optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file
* system. If an ExportPath
value is not provided, Amazon FSx sets a default export path,
* s3://import-bucket/FSxLustre[creation-timestamp]
. The timestamp is in UTC format, for example
* s3://import-bucket/FSxLustre20181105T222312Z
.
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If
* you specify only a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file
* system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on
* export. If you provide a custom prefix in the export path, such as
* s3://import-bucket/[custom-optional-prefix]
, Amazon FSx exports the contents of your file
* system to that export prefix in the Amazon S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withExportPath(String exportPath) {
setExportPath(exportPath);
return this;
}
/**
*
* (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount
* of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be
* striped across is limited by the total number of disks that make up the file system.
*
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a
* maximum size of 5 TB.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @param importedFileChunkSize
* (Optional) For files imported from a data repository, this value determines the stripe count and maximum
* amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a
* single file can be striped across is limited by the total number of disks that make up the file
* system.
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects
* have a maximum size of 5 TB.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*/
public void setImportedFileChunkSize(Integer importedFileChunkSize) {
this.importedFileChunkSize = importedFileChunkSize;
}
/**
*
* (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount
* of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be
* striped across is limited by the total number of disks that make up the file system.
*
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a
* maximum size of 5 TB.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @return (Optional) For files imported from a data repository, this value determines the stripe count and maximum
* amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a
* single file can be striped across is limited by the total number of disks that make up the file
* system.
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3
* objects have a maximum size of 5 TB.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*/
public Integer getImportedFileChunkSize() {
return this.importedFileChunkSize;
}
/**
*
* (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount
* of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be
* striped across is limited by the total number of disks that make up the file system.
*
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a
* maximum size of 5 TB.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @param importedFileChunkSize
* (Optional) For files imported from a data repository, this value determines the stripe count and maximum
* amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a
* single file can be striped across is limited by the total number of disks that make up the file
* system.
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects
* have a maximum size of 5 TB.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withImportedFileChunkSize(Integer importedFileChunkSize) {
setImportedFileChunkSize(importedFileChunkSize);
return this;
}
/**
*
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need temporary
* storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit
* encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
*
* Choose PERSISTENT_1
for longer-term storage and for throughput-focused workloads that aren’t
* latency-sensitive. PERSISTENT_1
supports encryption of data in transit, and is available in all
* Amazon Web Services Regions in which FSx for Lustre is available.
*
*
* Choose PERSISTENT_2
for longer-term storage and for latency-sensitive workloads that require the
* highest levels of IOPS/throughput. PERSISTENT_2
supports SSD storage, and offers higher
* PerUnitStorageThroughput
(up to 1000 MB/s/TiB). You can optionally specify a metadata configuration
* mode for PERSISTENT_2
which supports increasing metadata performance. PERSISTENT_2
is
* available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of
* Amazon Web Services Regions in which PERSISTENT_2
is available, see File
* system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.
*
*
*
* If you choose PERSISTENT_2
, and you set FileSystemTypeVersion
to 2.10
, the
* CreateFileSystem
operation fails.
*
*
*
* Encryption of data in transit is automatically turned on when you access SCRATCH_2
,
* PERSISTENT_1
, and PERSISTENT_2
file systems from Amazon EC2 instances that support
* automatic encryption in the Amazon Web Services Regions where they are available. For more information about
* encryption in transit for FSx for Lustre file systems, see Encrypting data in
* transit in the Amazon FSx for Lustre User Guide.
*
*
* (Default = SCRATCH_1
)
*
*
* @param deploymentType
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need
* temporary storage and shorter-term processing of data. The SCRATCH_2
deployment type provides
* in-transit encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
* Choose PERSISTENT_1
for longer-term storage and for throughput-focused workloads that aren’t
* latency-sensitive. PERSISTENT_1
supports encryption of data in transit, and is available in
* all Amazon Web Services Regions in which FSx for Lustre is available.
*
*
* Choose PERSISTENT_2
for longer-term storage and for latency-sensitive workloads that require
* the highest levels of IOPS/throughput. PERSISTENT_2
supports SSD storage, and offers higher
* PerUnitStorageThroughput
(up to 1000 MB/s/TiB). You can optionally specify a metadata
* configuration mode for PERSISTENT_2
which supports increasing metadata performance.
* PERSISTENT_2
is available in a limited number of Amazon Web Services Regions. For more
* information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2
is
* available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.
*
*
*
* If you choose PERSISTENT_2
, and you set FileSystemTypeVersion
to
* 2.10
, the CreateFileSystem
operation fails.
*
*
*
* Encryption of data in transit is automatically turned on when you access SCRATCH_2
,
* PERSISTENT_1
, and PERSISTENT_2
file systems from Amazon EC2 instances that
* support automatic encryption in the Amazon Web Services Regions where they are available. For more
* information about encryption in transit for FSx for Lustre file systems, see Encrypting data
* in transit in the Amazon FSx for Lustre User Guide.
*
*
* (Default = SCRATCH_1
)
* @see LustreDeploymentType
*/
public void setDeploymentType(String deploymentType) {
this.deploymentType = deploymentType;
}
/**
*
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need temporary
* storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit
* encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
*
* Choose PERSISTENT_1
for longer-term storage and for throughput-focused workloads that aren’t
* latency-sensitive. PERSISTENT_1
supports encryption of data in transit, and is available in all
* Amazon Web Services Regions in which FSx for Lustre is available.
*
*
* Choose PERSISTENT_2
for longer-term storage and for latency-sensitive workloads that require the
* highest levels of IOPS/throughput. PERSISTENT_2
supports SSD storage, and offers higher
* PerUnitStorageThroughput
(up to 1000 MB/s/TiB). You can optionally specify a metadata configuration
* mode for PERSISTENT_2
which supports increasing metadata performance. PERSISTENT_2
is
* available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of
* Amazon Web Services Regions in which PERSISTENT_2
is available, see File
* system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.
*
*
*
* If you choose PERSISTENT_2
, and you set FileSystemTypeVersion
to 2.10
, the
* CreateFileSystem
operation fails.
*
*
*
* Encryption of data in transit is automatically turned on when you access SCRATCH_2
,
* PERSISTENT_1
, and PERSISTENT_2
file systems from Amazon EC2 instances that support
* automatic encryption in the Amazon Web Services Regions where they are available. For more information about
* encryption in transit for FSx for Lustre file systems, see Encrypting data in
* transit in the Amazon FSx for Lustre User Guide.
*
*
* (Default = SCRATCH_1
)
*
*
* @return (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need
* temporary storage and shorter-term processing of data. The SCRATCH_2
deployment type
* provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1
* .
*
* Choose PERSISTENT_1
for longer-term storage and for throughput-focused workloads that aren’t
* latency-sensitive. PERSISTENT_1
supports encryption of data in transit, and is available in
* all Amazon Web Services Regions in which FSx for Lustre is available.
*
*
* Choose PERSISTENT_2
for longer-term storage and for latency-sensitive workloads that require
* the highest levels of IOPS/throughput. PERSISTENT_2
supports SSD storage, and offers higher
* PerUnitStorageThroughput
(up to 1000 MB/s/TiB). You can optionally specify a metadata
* configuration mode for PERSISTENT_2
which supports increasing metadata performance.
* PERSISTENT_2
is available in a limited number of Amazon Web Services Regions. For more
* information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2
is
* available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.
*
*
*
* If you choose PERSISTENT_2
, and you set FileSystemTypeVersion
to
* 2.10
, the CreateFileSystem
operation fails.
*
*
*
* Encryption of data in transit is automatically turned on when you access SCRATCH_2
,
* PERSISTENT_1
, and PERSISTENT_2
file systems from Amazon EC2 instances that
* support automatic encryption in the Amazon Web Services Regions where they are available. For more
* information about encryption in transit for FSx for Lustre file systems, see Encrypting data
* in transit in the Amazon FSx for Lustre User Guide.
*
*
* (Default = SCRATCH_1
)
* @see LustreDeploymentType
*/
public String getDeploymentType() {
return this.deploymentType;
}
/**
*
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need temporary
* storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit
* encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
*
* Choose PERSISTENT_1
for longer-term storage and for throughput-focused workloads that aren’t
* latency-sensitive. PERSISTENT_1
supports encryption of data in transit, and is available in all
* Amazon Web Services Regions in which FSx for Lustre is available.
*
*
* Choose PERSISTENT_2
for longer-term storage and for latency-sensitive workloads that require the
* highest levels of IOPS/throughput. PERSISTENT_2
supports SSD storage, and offers higher
* PerUnitStorageThroughput
(up to 1000 MB/s/TiB). You can optionally specify a metadata configuration
* mode for PERSISTENT_2
which supports increasing metadata performance. PERSISTENT_2
is
* available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of
* Amazon Web Services Regions in which PERSISTENT_2
is available, see File
* system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.
*
*
*
* If you choose PERSISTENT_2
, and you set FileSystemTypeVersion
to 2.10
, the
* CreateFileSystem
operation fails.
*
*
*
* Encryption of data in transit is automatically turned on when you access SCRATCH_2
,
* PERSISTENT_1
, and PERSISTENT_2
file systems from Amazon EC2 instances that support
* automatic encryption in the Amazon Web Services Regions where they are available. For more information about
* encryption in transit for FSx for Lustre file systems, see Encrypting data in
* transit in the Amazon FSx for Lustre User Guide.
*
*
* (Default = SCRATCH_1
)
*
*
* @param deploymentType
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need
* temporary storage and shorter-term processing of data. The SCRATCH_2
deployment type provides
* in-transit encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
* Choose PERSISTENT_1
for longer-term storage and for throughput-focused workloads that aren’t
* latency-sensitive. PERSISTENT_1
supports encryption of data in transit, and is available in
* all Amazon Web Services Regions in which FSx for Lustre is available.
*
*
* Choose PERSISTENT_2
for longer-term storage and for latency-sensitive workloads that require
* the highest levels of IOPS/throughput. PERSISTENT_2
supports SSD storage, and offers higher
* PerUnitStorageThroughput
(up to 1000 MB/s/TiB). You can optionally specify a metadata
* configuration mode for PERSISTENT_2
which supports increasing metadata performance.
* PERSISTENT_2
is available in a limited number of Amazon Web Services Regions. For more
* information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2
is
* available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.
*
*
*
* If you choose PERSISTENT_2
, and you set FileSystemTypeVersion
to
* 2.10
, the CreateFileSystem
operation fails.
*
*
*
* Encryption of data in transit is automatically turned on when you access SCRATCH_2
,
* PERSISTENT_1
, and PERSISTENT_2
file systems from Amazon EC2 instances that
* support automatic encryption in the Amazon Web Services Regions where they are available. For more
* information about encryption in transit for FSx for Lustre file systems, see Encrypting data
* in transit in the Amazon FSx for Lustre User Guide.
*
*
* (Default = SCRATCH_1
)
* @return Returns a reference to this object so that method calls can be chained together.
* @see LustreDeploymentType
*/
public CreateFileSystemLustreConfiguration withDeploymentType(String deploymentType) {
setDeploymentType(deploymentType);
return this;
}
/**
*
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need temporary
* storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit
* encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
*
* Choose PERSISTENT_1
for longer-term storage and for throughput-focused workloads that aren’t
* latency-sensitive. PERSISTENT_1
supports encryption of data in transit, and is available in all
* Amazon Web Services Regions in which FSx for Lustre is available.
*
*
* Choose PERSISTENT_2
for longer-term storage and for latency-sensitive workloads that require the
* highest levels of IOPS/throughput. PERSISTENT_2
supports SSD storage, and offers higher
* PerUnitStorageThroughput
(up to 1000 MB/s/TiB). You can optionally specify a metadata configuration
* mode for PERSISTENT_2
which supports increasing metadata performance. PERSISTENT_2
is
* available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of
* Amazon Web Services Regions in which PERSISTENT_2
is available, see File
* system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.
*
*
*
* If you choose PERSISTENT_2
, and you set FileSystemTypeVersion
to 2.10
, the
* CreateFileSystem
operation fails.
*
*
*
* Encryption of data in transit is automatically turned on when you access SCRATCH_2
,
* PERSISTENT_1
, and PERSISTENT_2
file systems from Amazon EC2 instances that support
* automatic encryption in the Amazon Web Services Regions where they are available. For more information about
* encryption in transit for FSx for Lustre file systems, see Encrypting data in
* transit in the Amazon FSx for Lustre User Guide.
*
*
* (Default = SCRATCH_1
)
*
*
* @param deploymentType
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need
* temporary storage and shorter-term processing of data. The SCRATCH_2
deployment type provides
* in-transit encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
* Choose PERSISTENT_1
for longer-term storage and for throughput-focused workloads that aren’t
* latency-sensitive. PERSISTENT_1
supports encryption of data in transit, and is available in
* all Amazon Web Services Regions in which FSx for Lustre is available.
*
*
* Choose PERSISTENT_2
for longer-term storage and for latency-sensitive workloads that require
* the highest levels of IOPS/throughput. PERSISTENT_2
supports SSD storage, and offers higher
* PerUnitStorageThroughput
(up to 1000 MB/s/TiB). You can optionally specify a metadata
* configuration mode for PERSISTENT_2
which supports increasing metadata performance.
* PERSISTENT_2
is available in a limited number of Amazon Web Services Regions. For more
* information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2
is
* available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.
*
*
*
* If you choose PERSISTENT_2
, and you set FileSystemTypeVersion
to
* 2.10
, the CreateFileSystem
operation fails.
*
*
*
* Encryption of data in transit is automatically turned on when you access SCRATCH_2
,
* PERSISTENT_1
, and PERSISTENT_2
file systems from Amazon EC2 instances that
* support automatic encryption in the Amazon Web Services Regions where they are available. For more
* information about encryption in transit for FSx for Lustre file systems, see Encrypting data
* in transit in the Amazon FSx for Lustre User Guide.
*
*
* (Default = SCRATCH_1
)
* @return Returns a reference to this object so that method calls can be chained together.
* @see LustreDeploymentType
*/
public CreateFileSystemLustreConfiguration withDeploymentType(LustreDeploymentType deploymentType) {
this.deploymentType = deploymentType.toString();
return this;
}
/**
*
* (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use
* this parameter to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify
* objects in your linked S3 bucket. AutoImportPolicy
can have the following values:
*
*
* -
*
* NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the
* linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or
* changed objects after choosing this option.
*
*
* -
*
* NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added
* to the linked S3 bucket that do not currently exist in the FSx file system.
*
*
* -
*
* NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any
* new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose
* this option.
*
*
* -
*
* NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory listings
* of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any
* objects that were deleted in the S3 bucket.
*
*
*
*
* For more information, see
* Automatically import updates from your S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @param autoImportPolicy
* (Optional) When you create your file system, your existing S3 objects appear as file and directory
* listings. Use this parameter to choose how Amazon FSx keeps your file and directory listings up to date as
* you add or modify objects in your linked S3 bucket. AutoImportPolicy
can have the following
* values:
*
* -
*
* NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from
* the linked S3 bucket when the file system is created. FSx does not update file and directory listings for
* any new or changed objects after choosing this option.
*
*
* -
*
* NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new
* objects added to the linked S3 bucket that do not currently exist in the FSx file system.
*
*
* -
*
* NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings
* of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after
* you choose this option.
*
*
* -
*
* NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory
* listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3
* bucket, and any objects that were deleted in the S3 bucket.
*
*
*
*
* For more information, see Automatically import updates from your S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
* @see AutoImportPolicyType
*/
public void setAutoImportPolicy(String autoImportPolicy) {
this.autoImportPolicy = autoImportPolicy;
}
/**
*
* (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use
* this parameter to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify
* objects in your linked S3 bucket. AutoImportPolicy
can have the following values:
*
*
* -
*
* NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the
* linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or
* changed objects after choosing this option.
*
*
* -
*
* NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added
* to the linked S3 bucket that do not currently exist in the FSx file system.
*
*
* -
*
* NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any
* new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose
* this option.
*
*
* -
*
* NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory listings
* of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any
* objects that were deleted in the S3 bucket.
*
*
*
*
* For more information, see
* Automatically import updates from your S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @return (Optional) When you create your file system, your existing S3 objects appear as file and directory
* listings. Use this parameter to choose how Amazon FSx keeps your file and directory listings up to date
* as you add or modify objects in your linked S3 bucket. AutoImportPolicy
can have the
* following values:
*
* -
*
* NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from
* the linked S3 bucket when the file system is created. FSx does not update file and directory listings for
* any new or changed objects after choosing this option.
*
*
* -
*
* NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new
* objects added to the linked S3 bucket that do not currently exist in the FSx file system.
*
*
* -
*
* NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings
* of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket
* after you choose this option.
*
*
* -
*
* NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory
* listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3
* bucket, and any objects that were deleted in the S3 bucket.
*
*
*
*
* For more information, see Automatically import updates from your S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
* @see AutoImportPolicyType
*/
public String getAutoImportPolicy() {
return this.autoImportPolicy;
}
/**
*
* (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use
* this parameter to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify
* objects in your linked S3 bucket. AutoImportPolicy
can have the following values:
*
*
* -
*
* NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the
* linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or
* changed objects after choosing this option.
*
*
* -
*
* NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added
* to the linked S3 bucket that do not currently exist in the FSx file system.
*
*
* -
*
* NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any
* new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose
* this option.
*
*
* -
*
* NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory listings
* of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any
* objects that were deleted in the S3 bucket.
*
*
*
*
* For more information, see
* Automatically import updates from your S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @param autoImportPolicy
* (Optional) When you create your file system, your existing S3 objects appear as file and directory
* listings. Use this parameter to choose how Amazon FSx keeps your file and directory listings up to date as
* you add or modify objects in your linked S3 bucket. AutoImportPolicy
can have the following
* values:
*
* -
*
* NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from
* the linked S3 bucket when the file system is created. FSx does not update file and directory listings for
* any new or changed objects after choosing this option.
*
*
* -
*
* NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new
* objects added to the linked S3 bucket that do not currently exist in the FSx file system.
*
*
* -
*
* NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings
* of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after
* you choose this option.
*
*
* -
*
* NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory
* listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3
* bucket, and any objects that were deleted in the S3 bucket.
*
*
*
*
* For more information, see Automatically import updates from your S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see AutoImportPolicyType
*/
public CreateFileSystemLustreConfiguration withAutoImportPolicy(String autoImportPolicy) {
setAutoImportPolicy(autoImportPolicy);
return this;
}
/**
*
* (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. Use
* this parameter to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify
* objects in your linked S3 bucket. AutoImportPolicy
can have the following values:
*
*
* -
*
* NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from the
* linked S3 bucket when the file system is created. FSx does not update file and directory listings for any new or
* changed objects after choosing this option.
*
*
* -
*
* NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added
* to the linked S3 bucket that do not currently exist in the FSx file system.
*
*
* -
*
* NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings of any
* new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after you choose
* this option.
*
*
* -
*
* NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory listings
* of any new objects added to the S3 bucket, any existing objects that are changed in the S3 bucket, and any
* objects that were deleted in the S3 bucket.
*
*
*
*
* For more information, see
* Automatically import updates from your S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
*
*
* @param autoImportPolicy
* (Optional) When you create your file system, your existing S3 objects appear as file and directory
* listings. Use this parameter to choose how Amazon FSx keeps your file and directory listings up to date as
* you add or modify objects in your linked S3 bucket. AutoImportPolicy
can have the following
* values:
*
* -
*
* NONE
- (Default) AutoImport is off. Amazon FSx only updates file and directory listings from
* the linked S3 bucket when the file system is created. FSx does not update file and directory listings for
* any new or changed objects after choosing this option.
*
*
* -
*
* NEW
- AutoImport is on. Amazon FSx automatically imports directory listings of any new
* objects added to the linked S3 bucket that do not currently exist in the FSx file system.
*
*
* -
*
* NEW_CHANGED
- AutoImport is on. Amazon FSx automatically imports file and directory listings
* of any new objects added to the S3 bucket and any existing objects that are changed in the S3 bucket after
* you choose this option.
*
*
* -
*
* NEW_CHANGED_DELETED
- AutoImport is on. Amazon FSx automatically imports file and directory
* listings of any new objects added to the S3 bucket, any existing objects that are changed in the S3
* bucket, and any objects that were deleted in the S3 bucket.
*
*
*
*
* For more information, see Automatically import updates from your S3 bucket.
*
*
*
* This parameter is not supported for file systems with a data repository association.
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see AutoImportPolicyType
*/
public CreateFileSystemLustreConfiguration withAutoImportPolicy(AutoImportPolicyType autoImportPolicy) {
this.autoImportPolicy = autoImportPolicy.toString();
return this;
}
/**
*
* Required with PERSISTENT_1
and PERSISTENT_2
deployment types, provisions the amount of
* read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system
* throughput capacity is calculated by multiplying file system storage capacity (TiB) by the
* PerUnitStorageThroughput
(MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of
* PerUnitStorageThroughput
yields 120 MB/s of file system throughput. You pay for the amount of
* throughput that you provision.
*
*
* Valid values:
*
*
* -
*
* For PERSISTENT_1
SSD storage: 50, 100, 200 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_1
HDD storage: 12, 40 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_2
SSD storage: 125, 250, 500, 1000 MB/s/TiB.
*
*
*
*
* @param perUnitStorageThroughput
* Required with PERSISTENT_1
and PERSISTENT_2
deployment types, provisions the
* amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in
* MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB)
* by the PerUnitStorageThroughput
(MB/s/TiB). For a 2.4-TiB file system, provisioning 50
* MB/s/TiB of PerUnitStorageThroughput
yields 120 MB/s of file system throughput. You pay for
* the amount of throughput that you provision.
*
* Valid values:
*
*
* -
*
* For PERSISTENT_1
SSD storage: 50, 100, 200 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_1
HDD storage: 12, 40 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_2
SSD storage: 125, 250, 500, 1000 MB/s/TiB.
*
*
*/
public void setPerUnitStorageThroughput(Integer perUnitStorageThroughput) {
this.perUnitStorageThroughput = perUnitStorageThroughput;
}
/**
*
* Required with PERSISTENT_1
and PERSISTENT_2
deployment types, provisions the amount of
* read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system
* throughput capacity is calculated by multiplying file system storage capacity (TiB) by the
* PerUnitStorageThroughput
(MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of
* PerUnitStorageThroughput
yields 120 MB/s of file system throughput. You pay for the amount of
* throughput that you provision.
*
*
* Valid values:
*
*
* -
*
* For PERSISTENT_1
SSD storage: 50, 100, 200 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_1
HDD storage: 12, 40 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_2
SSD storage: 125, 250, 500, 1000 MB/s/TiB.
*
*
*
*
* @return Required with PERSISTENT_1
and PERSISTENT_2
deployment types, provisions the
* amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in
* MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB)
* by the PerUnitStorageThroughput
(MB/s/TiB). For a 2.4-TiB file system, provisioning 50
* MB/s/TiB of PerUnitStorageThroughput
yields 120 MB/s of file system throughput. You pay for
* the amount of throughput that you provision.
*
* Valid values:
*
*
* -
*
* For PERSISTENT_1
SSD storage: 50, 100, 200 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_1
HDD storage: 12, 40 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_2
SSD storage: 125, 250, 500, 1000 MB/s/TiB.
*
*
*/
public Integer getPerUnitStorageThroughput() {
return this.perUnitStorageThroughput;
}
/**
*
* Required with PERSISTENT_1
and PERSISTENT_2
deployment types, provisions the amount of
* read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system
* throughput capacity is calculated by multiplying file system storage capacity (TiB) by the
* PerUnitStorageThroughput
(MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of
* PerUnitStorageThroughput
yields 120 MB/s of file system throughput. You pay for the amount of
* throughput that you provision.
*
*
* Valid values:
*
*
* -
*
* For PERSISTENT_1
SSD storage: 50, 100, 200 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_1
HDD storage: 12, 40 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_2
SSD storage: 125, 250, 500, 1000 MB/s/TiB.
*
*
*
*
* @param perUnitStorageThroughput
* Required with PERSISTENT_1
and PERSISTENT_2
deployment types, provisions the
* amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in
* MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB)
* by the PerUnitStorageThroughput
(MB/s/TiB). For a 2.4-TiB file system, provisioning 50
* MB/s/TiB of PerUnitStorageThroughput
yields 120 MB/s of file system throughput. You pay for
* the amount of throughput that you provision.
*
* Valid values:
*
*
* -
*
* For PERSISTENT_1
SSD storage: 50, 100, 200 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_1
HDD storage: 12, 40 MB/s/TiB.
*
*
* -
*
* For PERSISTENT_2
SSD storage: 125, 250, 500, 1000 MB/s/TiB.
*
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withPerUnitStorageThroughput(Integer perUnitStorageThroughput) {
setPerUnitStorageThroughput(perUnitStorageThroughput);
return this;
}
/**
* @param dailyAutomaticBackupStartTime
*/
public void setDailyAutomaticBackupStartTime(String dailyAutomaticBackupStartTime) {
this.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime;
}
/**
* @return
*/
public String getDailyAutomaticBackupStartTime() {
return this.dailyAutomaticBackupStartTime;
}
/**
* @param dailyAutomaticBackupStartTime
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withDailyAutomaticBackupStartTime(String dailyAutomaticBackupStartTime) {
setDailyAutomaticBackupStartTime(dailyAutomaticBackupStartTime);
return this;
}
/**
*
* The number of days to retain automatic backups. Setting this property to 0
disables automatic
* backups. You can retain automatic backups for a maximum of 90 days. The default is 0
.
*
*
* @param automaticBackupRetentionDays
* The number of days to retain automatic backups. Setting this property to 0
disables automatic
* backups. You can retain automatic backups for a maximum of 90 days. The default is 0
.
*/
public void setAutomaticBackupRetentionDays(Integer automaticBackupRetentionDays) {
this.automaticBackupRetentionDays = automaticBackupRetentionDays;
}
/**
*
* The number of days to retain automatic backups. Setting this property to 0
disables automatic
* backups. You can retain automatic backups for a maximum of 90 days. The default is 0
.
*
*
* @return The number of days to retain automatic backups. Setting this property to 0
disables
* automatic backups. You can retain automatic backups for a maximum of 90 days. The default is
* 0
.
*/
public Integer getAutomaticBackupRetentionDays() {
return this.automaticBackupRetentionDays;
}
/**
*
* The number of days to retain automatic backups. Setting this property to 0
disables automatic
* backups. You can retain automatic backups for a maximum of 90 days. The default is 0
.
*
*
* @param automaticBackupRetentionDays
* The number of days to retain automatic backups. Setting this property to 0
disables automatic
* backups. You can retain automatic backups for a maximum of 90 days. The default is 0
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withAutomaticBackupRetentionDays(Integer automaticBackupRetentionDays) {
setAutomaticBackupRetentionDays(automaticBackupRetentionDays);
return this;
}
/**
*
* (Optional) Not available for use with file systems that are linked to a data repository. A boolean flag
* indicating whether tags for the file system should be copied to backups. The default value is false. If
* CopyTagsToBackups
is set to true, all file system tags are copied to all automatic and
* user-initiated backups when the user doesn't specify any backup-specific tags. If CopyTagsToBackups
* is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you
* specify one or more tags when creating a user-initiated backup, no tags are copied from the file system,
* regardless of this value.
*
*
* (Default = false
)
*
*
* For more information, see
* Working with backups in the Amazon FSx for Lustre User Guide.
*
*
* @param copyTagsToBackups
* (Optional) Not available for use with file systems that are linked to a data repository. A boolean flag
* indicating whether tags for the file system should be copied to backups. The default value is false. If
* CopyTagsToBackups
is set to true, all file system tags are copied to all automatic and
* user-initiated backups when the user doesn't specify any backup-specific tags. If
* CopyTagsToBackups
is set to true and you specify one or more backup tags, only the specified
* tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags
* are copied from the file system, regardless of this value.
*
* (Default = false
)
*
*
* For more information, see Working with backups
* in the Amazon FSx for Lustre User Guide.
*/
public void setCopyTagsToBackups(Boolean copyTagsToBackups) {
this.copyTagsToBackups = copyTagsToBackups;
}
/**
*
* (Optional) Not available for use with file systems that are linked to a data repository. A boolean flag
* indicating whether tags for the file system should be copied to backups. The default value is false. If
* CopyTagsToBackups
is set to true, all file system tags are copied to all automatic and
* user-initiated backups when the user doesn't specify any backup-specific tags. If CopyTagsToBackups
* is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you
* specify one or more tags when creating a user-initiated backup, no tags are copied from the file system,
* regardless of this value.
*
*
* (Default = false
)
*
*
* For more information, see
* Working with backups in the Amazon FSx for Lustre User Guide.
*
*
* @return (Optional) Not available for use with file systems that are linked to a data repository. A boolean flag
* indicating whether tags for the file system should be copied to backups. The default value is false. If
* CopyTagsToBackups
is set to true, all file system tags are copied to all automatic and
* user-initiated backups when the user doesn't specify any backup-specific tags. If
* CopyTagsToBackups
is set to true and you specify one or more backup tags, only the specified
* tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no
* tags are copied from the file system, regardless of this value.
*
* (Default = false
)
*
*
* For more information, see Working with
* backups in the Amazon FSx for Lustre User Guide.
*/
public Boolean getCopyTagsToBackups() {
return this.copyTagsToBackups;
}
/**
*
* (Optional) Not available for use with file systems that are linked to a data repository. A boolean flag
* indicating whether tags for the file system should be copied to backups. The default value is false. If
* CopyTagsToBackups
is set to true, all file system tags are copied to all automatic and
* user-initiated backups when the user doesn't specify any backup-specific tags. If CopyTagsToBackups
* is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you
* specify one or more tags when creating a user-initiated backup, no tags are copied from the file system,
* regardless of this value.
*
*
* (Default = false
)
*
*
* For more information, see
* Working with backups in the Amazon FSx for Lustre User Guide.
*
*
* @param copyTagsToBackups
* (Optional) Not available for use with file systems that are linked to a data repository. A boolean flag
* indicating whether tags for the file system should be copied to backups. The default value is false. If
* CopyTagsToBackups
is set to true, all file system tags are copied to all automatic and
* user-initiated backups when the user doesn't specify any backup-specific tags. If
* CopyTagsToBackups
is set to true and you specify one or more backup tags, only the specified
* tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags
* are copied from the file system, regardless of this value.
*
* (Default = false
)
*
*
* For more information, see Working with backups
* in the Amazon FSx for Lustre User Guide.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withCopyTagsToBackups(Boolean copyTagsToBackups) {
setCopyTagsToBackups(copyTagsToBackups);
return this;
}
/**
*
* (Optional) Not available for use with file systems that are linked to a data repository. A boolean flag
* indicating whether tags for the file system should be copied to backups. The default value is false. If
* CopyTagsToBackups
is set to true, all file system tags are copied to all automatic and
* user-initiated backups when the user doesn't specify any backup-specific tags. If CopyTagsToBackups
* is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you
* specify one or more tags when creating a user-initiated backup, no tags are copied from the file system,
* regardless of this value.
*
*
* (Default = false
)
*
*
* For more information, see
* Working with backups in the Amazon FSx for Lustre User Guide.
*
*
* @return (Optional) Not available for use with file systems that are linked to a data repository. A boolean flag
* indicating whether tags for the file system should be copied to backups. The default value is false. If
* CopyTagsToBackups
is set to true, all file system tags are copied to all automatic and
* user-initiated backups when the user doesn't specify any backup-specific tags. If
* CopyTagsToBackups
is set to true and you specify one or more backup tags, only the specified
* tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no
* tags are copied from the file system, regardless of this value.
*
* (Default = false
)
*
*
* For more information, see Working with
* backups in the Amazon FSx for Lustre User Guide.
*/
public Boolean isCopyTagsToBackups() {
return this.copyTagsToBackups;
}
/**
*
* The type of drive cache used by PERSISTENT_1
file systems that are provisioned with HDD storage
* devices. This parameter is required when storage type is HDD. Set this property to READ
to improve
* the performance for frequently accessed files by caching up to 20% of the total storage capacity of the file
* system.
*
*
* This parameter is required when StorageType
is set to HDD
.
*
*
* @param driveCacheType
* The type of drive cache used by PERSISTENT_1
file systems that are provisioned with HDD
* storage devices. This parameter is required when storage type is HDD. Set this property to
* READ
to improve the performance for frequently accessed files by caching up to 20% of the
* total storage capacity of the file system.
*
* This parameter is required when StorageType
is set to HDD
.
* @see DriveCacheType
*/
public void setDriveCacheType(String driveCacheType) {
this.driveCacheType = driveCacheType;
}
/**
*
* The type of drive cache used by PERSISTENT_1
file systems that are provisioned with HDD storage
* devices. This parameter is required when storage type is HDD. Set this property to READ
to improve
* the performance for frequently accessed files by caching up to 20% of the total storage capacity of the file
* system.
*
*
* This parameter is required when StorageType
is set to HDD
.
*
*
* @return The type of drive cache used by PERSISTENT_1
file systems that are provisioned with HDD
* storage devices. This parameter is required when storage type is HDD. Set this property to
* READ
to improve the performance for frequently accessed files by caching up to 20% of the
* total storage capacity of the file system.
*
* This parameter is required when StorageType
is set to HDD
.
* @see DriveCacheType
*/
public String getDriveCacheType() {
return this.driveCacheType;
}
/**
*
* The type of drive cache used by PERSISTENT_1
file systems that are provisioned with HDD storage
* devices. This parameter is required when storage type is HDD. Set this property to READ
to improve
* the performance for frequently accessed files by caching up to 20% of the total storage capacity of the file
* system.
*
*
* This parameter is required when StorageType
is set to HDD
.
*
*
* @param driveCacheType
* The type of drive cache used by PERSISTENT_1
file systems that are provisioned with HDD
* storage devices. This parameter is required when storage type is HDD. Set this property to
* READ
to improve the performance for frequently accessed files by caching up to 20% of the
* total storage capacity of the file system.
*
* This parameter is required when StorageType
is set to HDD
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see DriveCacheType
*/
public CreateFileSystemLustreConfiguration withDriveCacheType(String driveCacheType) {
setDriveCacheType(driveCacheType);
return this;
}
/**
*
* The type of drive cache used by PERSISTENT_1
file systems that are provisioned with HDD storage
* devices. This parameter is required when storage type is HDD. Set this property to READ
to improve
* the performance for frequently accessed files by caching up to 20% of the total storage capacity of the file
* system.
*
*
* This parameter is required when StorageType
is set to HDD
.
*
*
* @param driveCacheType
* The type of drive cache used by PERSISTENT_1
file systems that are provisioned with HDD
* storage devices. This parameter is required when storage type is HDD. Set this property to
* READ
to improve the performance for frequently accessed files by caching up to 20% of the
* total storage capacity of the file system.
*
* This parameter is required when StorageType
is set to HDD
.
* @return Returns a reference to this object so that method calls can be chained together.
* @see DriveCacheType
*/
public CreateFileSystemLustreConfiguration withDriveCacheType(DriveCacheType driveCacheType) {
this.driveCacheType = driveCacheType.toString();
return this;
}
/**
*
* Sets the data compression configuration for the file system. DataCompressionType
can have the
* following values:
*
*
* -
*
* NONE
- (Default) Data compression is turned off when the file system is created.
*
*
* -
*
* LZ4
- Data compression is turned on with the LZ4 algorithm.
*
*
*
*
* For more information, see Lustre data compression in
* the Amazon FSx for Lustre User Guide.
*
*
* @param dataCompressionType
* Sets the data compression configuration for the file system. DataCompressionType
can have the
* following values:
*
* -
*
* NONE
- (Default) Data compression is turned off when the file system is created.
*
*
* -
*
* LZ4
- Data compression is turned on with the LZ4 algorithm.
*
*
*
*
* For more information, see Lustre data
* compression in the Amazon FSx for Lustre User Guide.
* @see DataCompressionType
*/
public void setDataCompressionType(String dataCompressionType) {
this.dataCompressionType = dataCompressionType;
}
/**
*
* Sets the data compression configuration for the file system. DataCompressionType
can have the
* following values:
*
*
* -
*
* NONE
- (Default) Data compression is turned off when the file system is created.
*
*
* -
*
* LZ4
- Data compression is turned on with the LZ4 algorithm.
*
*
*
*
* For more information, see Lustre data compression in
* the Amazon FSx for Lustre User Guide.
*
*
* @return Sets the data compression configuration for the file system. DataCompressionType
can have
* the following values:
*
* -
*
* NONE
- (Default) Data compression is turned off when the file system is created.
*
*
* -
*
* LZ4
- Data compression is turned on with the LZ4 algorithm.
*
*
*
*
* For more information, see Lustre data
* compression in the Amazon FSx for Lustre User Guide.
* @see DataCompressionType
*/
public String getDataCompressionType() {
return this.dataCompressionType;
}
/**
*
* Sets the data compression configuration for the file system. DataCompressionType
can have the
* following values:
*
*
* -
*
* NONE
- (Default) Data compression is turned off when the file system is created.
*
*
* -
*
* LZ4
- Data compression is turned on with the LZ4 algorithm.
*
*
*
*
* For more information, see Lustre data compression in
* the Amazon FSx for Lustre User Guide.
*
*
* @param dataCompressionType
* Sets the data compression configuration for the file system. DataCompressionType
can have the
* following values:
*
* -
*
* NONE
- (Default) Data compression is turned off when the file system is created.
*
*
* -
*
* LZ4
- Data compression is turned on with the LZ4 algorithm.
*
*
*
*
* For more information, see Lustre data
* compression in the Amazon FSx for Lustre User Guide.
* @return Returns a reference to this object so that method calls can be chained together.
* @see DataCompressionType
*/
public CreateFileSystemLustreConfiguration withDataCompressionType(String dataCompressionType) {
setDataCompressionType(dataCompressionType);
return this;
}
/**
*
* Sets the data compression configuration for the file system. DataCompressionType
can have the
* following values:
*
*
* -
*
* NONE
- (Default) Data compression is turned off when the file system is created.
*
*
* -
*
* LZ4
- Data compression is turned on with the LZ4 algorithm.
*
*
*
*
* For more information, see Lustre data compression in
* the Amazon FSx for Lustre User Guide.
*
*
* @param dataCompressionType
* Sets the data compression configuration for the file system. DataCompressionType
can have the
* following values:
*
* -
*
* NONE
- (Default) Data compression is turned off when the file system is created.
*
*
* -
*
* LZ4
- Data compression is turned on with the LZ4 algorithm.
*
*
*
*
* For more information, see Lustre data
* compression in the Amazon FSx for Lustre User Guide.
* @return Returns a reference to this object so that method calls can be chained together.
* @see DataCompressionType
*/
public CreateFileSystemLustreConfiguration withDataCompressionType(DataCompressionType dataCompressionType) {
this.dataCompressionType = dataCompressionType.toString();
return this;
}
/**
*
* The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is
* enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon
* CloudWatch Logs.
*
*
* @param logConfiguration
* The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is
* enabled, Lustre logs error and warning events for data repositories associated with your file system to
* Amazon CloudWatch Logs.
*/
public void setLogConfiguration(LustreLogCreateConfiguration logConfiguration) {
this.logConfiguration = logConfiguration;
}
/**
*
* The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is
* enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon
* CloudWatch Logs.
*
*
* @return The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is
* enabled, Lustre logs error and warning events for data repositories associated with your file system to
* Amazon CloudWatch Logs.
*/
public LustreLogCreateConfiguration getLogConfiguration() {
return this.logConfiguration;
}
/**
*
* The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is
* enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon
* CloudWatch Logs.
*
*
* @param logConfiguration
* The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is
* enabled, Lustre logs error and warning events for data repositories associated with your file system to
* Amazon CloudWatch Logs.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withLogConfiguration(LustreLogCreateConfiguration logConfiguration) {
setLogConfiguration(logConfiguration);
return this;
}
/**
*
* The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root
* squash restricts root-level access from clients that try to access your file system as a root user.
*
*
* @param rootSquashConfiguration
* The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When
* enabled, root squash restricts root-level access from clients that try to access your file system as a
* root user.
*/
public void setRootSquashConfiguration(LustreRootSquashConfiguration rootSquashConfiguration) {
this.rootSquashConfiguration = rootSquashConfiguration;
}
/**
*
* The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root
* squash restricts root-level access from clients that try to access your file system as a root user.
*
*
* @return The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When
* enabled, root squash restricts root-level access from clients that try to access your file system as a
* root user.
*/
public LustreRootSquashConfiguration getRootSquashConfiguration() {
return this.rootSquashConfiguration;
}
/**
*
* The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root
* squash restricts root-level access from clients that try to access your file system as a root user.
*
*
* @param rootSquashConfiguration
* The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When
* enabled, root squash restricts root-level access from clients that try to access your file system as a
* root user.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withRootSquashConfiguration(LustreRootSquashConfiguration rootSquashConfiguration) {
setRootSquashConfiguration(rootSquashConfiguration);
return this;
}
/**
*
* The Lustre metadata performance configuration for the creation of an FSx for Lustre file system using a
* PERSISTENT_2
deployment type.
*
*
* @param metadataConfiguration
* The Lustre metadata performance configuration for the creation of an FSx for Lustre file system using a
* PERSISTENT_2
deployment type.
*/
public void setMetadataConfiguration(CreateFileSystemLustreMetadataConfiguration metadataConfiguration) {
this.metadataConfiguration = metadataConfiguration;
}
/**
*
* The Lustre metadata performance configuration for the creation of an FSx for Lustre file system using a
* PERSISTENT_2
deployment type.
*
*
* @return The Lustre metadata performance configuration for the creation of an FSx for Lustre file system using a
* PERSISTENT_2
deployment type.
*/
public CreateFileSystemLustreMetadataConfiguration getMetadataConfiguration() {
return this.metadataConfiguration;
}
/**
*
* The Lustre metadata performance configuration for the creation of an FSx for Lustre file system using a
* PERSISTENT_2
deployment type.
*
*
* @param metadataConfiguration
* The Lustre metadata performance configuration for the creation of an FSx for Lustre file system using a
* PERSISTENT_2
deployment type.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withMetadataConfiguration(CreateFileSystemLustreMetadataConfiguration metadataConfiguration) {
setMetadataConfiguration(metadataConfiguration);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getWeeklyMaintenanceStartTime() != null)
sb.append("WeeklyMaintenanceStartTime: ").append(getWeeklyMaintenanceStartTime()).append(",");
if (getImportPath() != null)
sb.append("ImportPath: ").append(getImportPath()).append(",");
if (getExportPath() != null)
sb.append("ExportPath: ").append(getExportPath()).append(",");
if (getImportedFileChunkSize() != null)
sb.append("ImportedFileChunkSize: ").append(getImportedFileChunkSize()).append(",");
if (getDeploymentType() != null)
sb.append("DeploymentType: ").append(getDeploymentType()).append(",");
if (getAutoImportPolicy() != null)
sb.append("AutoImportPolicy: ").append(getAutoImportPolicy()).append(",");
if (getPerUnitStorageThroughput() != null)
sb.append("PerUnitStorageThroughput: ").append(getPerUnitStorageThroughput()).append(",");
if (getDailyAutomaticBackupStartTime() != null)
sb.append("DailyAutomaticBackupStartTime: ").append(getDailyAutomaticBackupStartTime()).append(",");
if (getAutomaticBackupRetentionDays() != null)
sb.append("AutomaticBackupRetentionDays: ").append(getAutomaticBackupRetentionDays()).append(",");
if (getCopyTagsToBackups() != null)
sb.append("CopyTagsToBackups: ").append(getCopyTagsToBackups()).append(",");
if (getDriveCacheType() != null)
sb.append("DriveCacheType: ").append(getDriveCacheType()).append(",");
if (getDataCompressionType() != null)
sb.append("DataCompressionType: ").append(getDataCompressionType()).append(",");
if (getLogConfiguration() != null)
sb.append("LogConfiguration: ").append(getLogConfiguration()).append(",");
if (getRootSquashConfiguration() != null)
sb.append("RootSquashConfiguration: ").append(getRootSquashConfiguration()).append(",");
if (getMetadataConfiguration() != null)
sb.append("MetadataConfiguration: ").append(getMetadataConfiguration());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof CreateFileSystemLustreConfiguration == false)
return false;
CreateFileSystemLustreConfiguration other = (CreateFileSystemLustreConfiguration) obj;
if (other.getWeeklyMaintenanceStartTime() == null ^ this.getWeeklyMaintenanceStartTime() == null)
return false;
if (other.getWeeklyMaintenanceStartTime() != null && other.getWeeklyMaintenanceStartTime().equals(this.getWeeklyMaintenanceStartTime()) == false)
return false;
if (other.getImportPath() == null ^ this.getImportPath() == null)
return false;
if (other.getImportPath() != null && other.getImportPath().equals(this.getImportPath()) == false)
return false;
if (other.getExportPath() == null ^ this.getExportPath() == null)
return false;
if (other.getExportPath() != null && other.getExportPath().equals(this.getExportPath()) == false)
return false;
if (other.getImportedFileChunkSize() == null ^ this.getImportedFileChunkSize() == null)
return false;
if (other.getImportedFileChunkSize() != null && other.getImportedFileChunkSize().equals(this.getImportedFileChunkSize()) == false)
return false;
if (other.getDeploymentType() == null ^ this.getDeploymentType() == null)
return false;
if (other.getDeploymentType() != null && other.getDeploymentType().equals(this.getDeploymentType()) == false)
return false;
if (other.getAutoImportPolicy() == null ^ this.getAutoImportPolicy() == null)
return false;
if (other.getAutoImportPolicy() != null && other.getAutoImportPolicy().equals(this.getAutoImportPolicy()) == false)
return false;
if (other.getPerUnitStorageThroughput() == null ^ this.getPerUnitStorageThroughput() == null)
return false;
if (other.getPerUnitStorageThroughput() != null && other.getPerUnitStorageThroughput().equals(this.getPerUnitStorageThroughput()) == false)
return false;
if (other.getDailyAutomaticBackupStartTime() == null ^ this.getDailyAutomaticBackupStartTime() == null)
return false;
if (other.getDailyAutomaticBackupStartTime() != null
&& other.getDailyAutomaticBackupStartTime().equals(this.getDailyAutomaticBackupStartTime()) == false)
return false;
if (other.getAutomaticBackupRetentionDays() == null ^ this.getAutomaticBackupRetentionDays() == null)
return false;
if (other.getAutomaticBackupRetentionDays() != null && other.getAutomaticBackupRetentionDays().equals(this.getAutomaticBackupRetentionDays()) == false)
return false;
if (other.getCopyTagsToBackups() == null ^ this.getCopyTagsToBackups() == null)
return false;
if (other.getCopyTagsToBackups() != null && other.getCopyTagsToBackups().equals(this.getCopyTagsToBackups()) == false)
return false;
if (other.getDriveCacheType() == null ^ this.getDriveCacheType() == null)
return false;
if (other.getDriveCacheType() != null && other.getDriveCacheType().equals(this.getDriveCacheType()) == false)
return false;
if (other.getDataCompressionType() == null ^ this.getDataCompressionType() == null)
return false;
if (other.getDataCompressionType() != null && other.getDataCompressionType().equals(this.getDataCompressionType()) == false)
return false;
if (other.getLogConfiguration() == null ^ this.getLogConfiguration() == null)
return false;
if (other.getLogConfiguration() != null && other.getLogConfiguration().equals(this.getLogConfiguration()) == false)
return false;
if (other.getRootSquashConfiguration() == null ^ this.getRootSquashConfiguration() == null)
return false;
if (other.getRootSquashConfiguration() != null && other.getRootSquashConfiguration().equals(this.getRootSquashConfiguration()) == false)
return false;
if (other.getMetadataConfiguration() == null ^ this.getMetadataConfiguration() == null)
return false;
if (other.getMetadataConfiguration() != null && other.getMetadataConfiguration().equals(this.getMetadataConfiguration()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getWeeklyMaintenanceStartTime() == null) ? 0 : getWeeklyMaintenanceStartTime().hashCode());
hashCode = prime * hashCode + ((getImportPath() == null) ? 0 : getImportPath().hashCode());
hashCode = prime * hashCode + ((getExportPath() == null) ? 0 : getExportPath().hashCode());
hashCode = prime * hashCode + ((getImportedFileChunkSize() == null) ? 0 : getImportedFileChunkSize().hashCode());
hashCode = prime * hashCode + ((getDeploymentType() == null) ? 0 : getDeploymentType().hashCode());
hashCode = prime * hashCode + ((getAutoImportPolicy() == null) ? 0 : getAutoImportPolicy().hashCode());
hashCode = prime * hashCode + ((getPerUnitStorageThroughput() == null) ? 0 : getPerUnitStorageThroughput().hashCode());
hashCode = prime * hashCode + ((getDailyAutomaticBackupStartTime() == null) ? 0 : getDailyAutomaticBackupStartTime().hashCode());
hashCode = prime * hashCode + ((getAutomaticBackupRetentionDays() == null) ? 0 : getAutomaticBackupRetentionDays().hashCode());
hashCode = prime * hashCode + ((getCopyTagsToBackups() == null) ? 0 : getCopyTagsToBackups().hashCode());
hashCode = prime * hashCode + ((getDriveCacheType() == null) ? 0 : getDriveCacheType().hashCode());
hashCode = prime * hashCode + ((getDataCompressionType() == null) ? 0 : getDataCompressionType().hashCode());
hashCode = prime * hashCode + ((getLogConfiguration() == null) ? 0 : getLogConfiguration().hashCode());
hashCode = prime * hashCode + ((getRootSquashConfiguration() == null) ? 0 : getRootSquashConfiguration().hashCode());
hashCode = prime * hashCode + ((getMetadataConfiguration() == null) ? 0 : getMetadataConfiguration().hashCode());
return hashCode;
}
@Override
public CreateFileSystemLustreConfiguration clone() {
try {
return (CreateFileSystemLustreConfiguration) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.fsx.model.transform.CreateFileSystemLustreConfigurationMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}