com.amazonaws.services.fsx.model.CreateFileSystemLustreConfiguration Maven / Gradle / Ivy
Show all versions of aws-java-sdk-fsx Show documentation
/*
* Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.fsx.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
*
* The Lustre configuration for the file system being created.
*
*
* @see AWS API Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class CreateFileSystemLustreConfiguration implements Serializable, Cloneable, StructuredPojo {
/**
*
* The preferred time to perform weekly maintenance, in the UTC time zone.
*
*/
private String weeklyMaintenanceStartTime;
/**
*
* (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped
* to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix
. If
* you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file
* system.
*
*/
private String importPath;
/**
*
* (Optional) The path in Amazon S3 where the root of your Amazon FSx file system is exported. The path must use the
* same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed
* data is to be exported from your Amazon FSx for Lustre file system. If an ExportPath
value is not
* provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]
.
* The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z
.
*
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If you
* only specify a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file system objects
* to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a
* custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix]
, Amazon FSx
* exports the contents of your file system to that export prefix in the Amazon S3 bucket.
*
*/
private String exportPath;
/**
*
* (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount
* of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be
* striped across is limited by the total number of disks that make up the file system.
*
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a
* maximum size of 5 TB.
*
*/
private Integer importedFileChunkSize;
/**
*
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need temporary
* storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit
* encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
*
* Choose PERSISTENT_1
deployment type for longer-term storage and workloads and encryption of data in
* transit. To learn more about deployment types, see FSx for Lustre Deployment
* Options.
*
*
* Encryption of data in-transit is automatically enabled when you access a SCRATCH_2
or
* PERSISTENT_1
file system from Amazon EC2 instances that support this feature.
* (Default = SCRATCH_1
)
*
*
* Encryption of data in-transit for SCRATCH_2
and PERSISTENT_1
deployment types is
* supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data in
* Transit.
*
*/
private String deploymentType;
/**
*
* Required for the PERSISTENT_1
deployment type, describes the amount of read and write throughput for
* each 1 tebibyte of storage, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system
* storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4 TiB file system, provisioning 50
* MB/s/TiB of PerUnitStorageThroughput yields 117 MB/s of file system throughput. You pay for the amount of
* throughput that you provision.
*
*
* Valid values are 50, 100, 200.
*
*/
private Integer perUnitStorageThroughput;
/**
*
* The preferred time to perform weekly maintenance, in the UTC time zone.
*
*
* @param weeklyMaintenanceStartTime
* The preferred time to perform weekly maintenance, in the UTC time zone.
*/
public void setWeeklyMaintenanceStartTime(String weeklyMaintenanceStartTime) {
this.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime;
}
/**
*
* The preferred time to perform weekly maintenance, in the UTC time zone.
*
*
* @return The preferred time to perform weekly maintenance, in the UTC time zone.
*/
public String getWeeklyMaintenanceStartTime() {
return this.weeklyMaintenanceStartTime;
}
/**
*
* The preferred time to perform weekly maintenance, in the UTC time zone.
*
*
* @param weeklyMaintenanceStartTime
* The preferred time to perform weekly maintenance, in the UTC time zone.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withWeeklyMaintenanceStartTime(String weeklyMaintenanceStartTime) {
setWeeklyMaintenanceStartTime(weeklyMaintenanceStartTime);
return this;
}
/**
*
* (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped
* to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix
. If
* you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file
* system.
*
*
* @param importPath
* (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be
* mapped to the root of the Amazon S3 bucket you select. An example is
* s3://import-bucket/optional-prefix
. If you specify a prefix after the Amazon S3 bucket name,
* only object keys with that prefix are loaded into the file system.
*/
public void setImportPath(String importPath) {
this.importPath = importPath;
}
/**
*
* (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped
* to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix
. If
* you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file
* system.
*
*
* @return (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will
* be mapped to the root of the Amazon S3 bucket you select. An example is
* s3://import-bucket/optional-prefix
. If you specify a prefix after the Amazon S3 bucket name,
* only object keys with that prefix are loaded into the file system.
*/
public String getImportPath() {
return this.importPath;
}
/**
*
* (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped
* to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix
. If
* you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file
* system.
*
*
* @param importPath
* (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data
* repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be
* mapped to the root of the Amazon S3 bucket you select. An example is
* s3://import-bucket/optional-prefix
. If you specify a prefix after the Amazon S3 bucket name,
* only object keys with that prefix are loaded into the file system.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withImportPath(String importPath) {
setImportPath(importPath);
return this;
}
/**
*
* (Optional) The path in Amazon S3 where the root of your Amazon FSx file system is exported. The path must use the
* same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed
* data is to be exported from your Amazon FSx for Lustre file system. If an ExportPath
value is not
* provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]
.
* The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z
.
*
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If you
* only specify a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file system objects
* to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a
* custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix]
, Amazon FSx
* exports the contents of your file system to that export prefix in the Amazon S3 bucket.
*
*
* @param exportPath
* (Optional) The path in Amazon S3 where the root of your Amazon FSx file system is exported. The path must
* use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new
* and changed data is to be exported from your Amazon FSx for Lustre file system. If an
* ExportPath
value is not provided, Amazon FSx sets a default export path,
* s3://import-bucket/FSxLustre[creation-timestamp]
. The timestamp is in UTC format, for example
* s3://import-bucket/FSxLustre20181105T222312Z
.
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If
* you only specify a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file
* system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on
* export. If you provide a custom prefix in the export path, such as
* s3://import-bucket/[custom-optional-prefix]
, Amazon FSx exports the contents of your file
* system to that export prefix in the Amazon S3 bucket.
*/
public void setExportPath(String exportPath) {
this.exportPath = exportPath;
}
/**
*
* (Optional) The path in Amazon S3 where the root of your Amazon FSx file system is exported. The path must use the
* same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed
* data is to be exported from your Amazon FSx for Lustre file system. If an ExportPath
value is not
* provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]
.
* The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z
.
*
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If you
* only specify a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file system objects
* to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a
* custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix]
, Amazon FSx
* exports the contents of your file system to that export prefix in the Amazon S3 bucket.
*
*
* @return (Optional) The path in Amazon S3 where the root of your Amazon FSx file system is exported. The path must
* use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new
* and changed data is to be exported from your Amazon FSx for Lustre file system. If an
* ExportPath
value is not provided, Amazon FSx sets a default export path,
* s3://import-bucket/FSxLustre[creation-timestamp]
. The timestamp is in UTC format, for
* example s3://import-bucket/FSxLustre20181105T222312Z
.
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
.
* If you only specify a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file
* system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on
* export. If you provide a custom prefix in the export path, such as
* s3://import-bucket/[custom-optional-prefix]
, Amazon FSx exports the contents of your file
* system to that export prefix in the Amazon S3 bucket.
*/
public String getExportPath() {
return this.exportPath;
}
/**
*
* (Optional) The path in Amazon S3 where the root of your Amazon FSx file system is exported. The path must use the
* same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed
* data is to be exported from your Amazon FSx for Lustre file system. If an ExportPath
value is not
* provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]
.
* The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z
.
*
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If you
* only specify a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file system objects
* to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a
* custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix]
, Amazon FSx
* exports the contents of your file system to that export prefix in the Amazon S3 bucket.
*
*
* @param exportPath
* (Optional) The path in Amazon S3 where the root of your Amazon FSx file system is exported. The path must
* use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new
* and changed data is to be exported from your Amazon FSx for Lustre file system. If an
* ExportPath
value is not provided, Amazon FSx sets a default export path,
* s3://import-bucket/FSxLustre[creation-timestamp]
. The timestamp is in UTC format, for example
* s3://import-bucket/FSxLustre20181105T222312Z
.
*
* The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath
. If
* you only specify a bucket name, such as s3://import-bucket
, you get a 1:1 mapping of file
* system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on
* export. If you provide a custom prefix in the export path, such as
* s3://import-bucket/[custom-optional-prefix]
, Amazon FSx exports the contents of your file
* system to that export prefix in the Amazon S3 bucket.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withExportPath(String exportPath) {
setExportPath(exportPath);
return this;
}
/**
*
* (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount
* of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be
* striped across is limited by the total number of disks that make up the file system.
*
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a
* maximum size of 5 TB.
*
*
* @param importedFileChunkSize
* (Optional) For files imported from a data repository, this value determines the stripe count and maximum
* amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a
* single file can be striped across is limited by the total number of disks that make up the file
* system.
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects
* have a maximum size of 5 TB.
*/
public void setImportedFileChunkSize(Integer importedFileChunkSize) {
this.importedFileChunkSize = importedFileChunkSize;
}
/**
*
* (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount
* of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be
* striped across is limited by the total number of disks that make up the file system.
*
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a
* maximum size of 5 TB.
*
*
* @return (Optional) For files imported from a data repository, this value determines the stripe count and maximum
* amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a
* single file can be striped across is limited by the total number of disks that make up the file
* system.
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3
* objects have a maximum size of 5 TB.
*/
public Integer getImportedFileChunkSize() {
return this.importedFileChunkSize;
}
/**
*
* (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount
* of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be
* striped across is limited by the total number of disks that make up the file system.
*
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a
* maximum size of 5 TB.
*
*
* @param importedFileChunkSize
* (Optional) For files imported from a data repository, this value determines the stripe count and maximum
* amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a
* single file can be striped across is limited by the total number of disks that make up the file
* system.
*
* The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects
* have a maximum size of 5 TB.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withImportedFileChunkSize(Integer importedFileChunkSize) {
setImportedFileChunkSize(importedFileChunkSize);
return this;
}
/**
*
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need temporary
* storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit
* encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
*
* Choose PERSISTENT_1
deployment type for longer-term storage and workloads and encryption of data in
* transit. To learn more about deployment types, see FSx for Lustre Deployment
* Options.
*
*
* Encryption of data in-transit is automatically enabled when you access a SCRATCH_2
or
* PERSISTENT_1
file system from Amazon EC2 instances that support this feature.
* (Default = SCRATCH_1
)
*
*
* Encryption of data in-transit for SCRATCH_2
and PERSISTENT_1
deployment types is
* supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data in
* Transit.
*
*
* @param deploymentType
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need
* temporary storage and shorter-term processing of data. The SCRATCH_2
deployment type provides
* in-transit encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
* Choose PERSISTENT_1
deployment type for longer-term storage and workloads and encryption of
* data in transit. To learn more about deployment types, see FSx for Lustre
* Deployment Options.
*
*
* Encryption of data in-transit is automatically enabled when you access a SCRATCH_2
or
* PERSISTENT_1
file system from Amazon EC2 instances that support this feature.
* (Default = SCRATCH_1
)
*
*
* Encryption of data in-transit for SCRATCH_2
and PERSISTENT_1
deployment types is
* supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data
* in Transit.
* @see LustreDeploymentType
*/
public void setDeploymentType(String deploymentType) {
this.deploymentType = deploymentType;
}
/**
*
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need temporary
* storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit
* encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
*
* Choose PERSISTENT_1
deployment type for longer-term storage and workloads and encryption of data in
* transit. To learn more about deployment types, see FSx for Lustre Deployment
* Options.
*
*
* Encryption of data in-transit is automatically enabled when you access a SCRATCH_2
or
* PERSISTENT_1
file system from Amazon EC2 instances that support this feature.
* (Default = SCRATCH_1
)
*
*
* Encryption of data in-transit for SCRATCH_2
and PERSISTENT_1
deployment types is
* supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data in
* Transit.
*
*
* @return (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need
* temporary storage and shorter-term processing of data. The SCRATCH_2
deployment type
* provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1
* .
*
* Choose PERSISTENT_1
deployment type for longer-term storage and workloads and encryption of
* data in transit. To learn more about deployment types, see FSx for Lustre
* Deployment Options.
*
*
* Encryption of data in-transit is automatically enabled when you access a SCRATCH_2
or
* PERSISTENT_1
file system from Amazon EC2 instances that support this
* feature. (Default = SCRATCH_1
)
*
*
* Encryption of data in-transit for SCRATCH_2
and PERSISTENT_1
deployment types
* is supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data
* in Transit.
* @see LustreDeploymentType
*/
public String getDeploymentType() {
return this.deploymentType;
}
/**
*
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need temporary
* storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit
* encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
*
* Choose PERSISTENT_1
deployment type for longer-term storage and workloads and encryption of data in
* transit. To learn more about deployment types, see FSx for Lustre Deployment
* Options.
*
*
* Encryption of data in-transit is automatically enabled when you access a SCRATCH_2
or
* PERSISTENT_1
file system from Amazon EC2 instances that support this feature.
* (Default = SCRATCH_1
)
*
*
* Encryption of data in-transit for SCRATCH_2
and PERSISTENT_1
deployment types is
* supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data in
* Transit.
*
*
* @param deploymentType
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need
* temporary storage and shorter-term processing of data. The SCRATCH_2
deployment type provides
* in-transit encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
* Choose PERSISTENT_1
deployment type for longer-term storage and workloads and encryption of
* data in transit. To learn more about deployment types, see FSx for Lustre
* Deployment Options.
*
*
* Encryption of data in-transit is automatically enabled when you access a SCRATCH_2
or
* PERSISTENT_1
file system from Amazon EC2 instances that support this feature.
* (Default = SCRATCH_1
)
*
*
* Encryption of data in-transit for SCRATCH_2
and PERSISTENT_1
deployment types is
* supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data
* in Transit.
* @return Returns a reference to this object so that method calls can be chained together.
* @see LustreDeploymentType
*/
public CreateFileSystemLustreConfiguration withDeploymentType(String deploymentType) {
setDeploymentType(deploymentType);
return this;
}
/**
*
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need temporary
* storage and shorter-term processing of data. The SCRATCH_2
deployment type provides in-transit
* encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
*
* Choose PERSISTENT_1
deployment type for longer-term storage and workloads and encryption of data in
* transit. To learn more about deployment types, see FSx for Lustre Deployment
* Options.
*
*
* Encryption of data in-transit is automatically enabled when you access a SCRATCH_2
or
* PERSISTENT_1
file system from Amazon EC2 instances that support this feature.
* (Default = SCRATCH_1
)
*
*
* Encryption of data in-transit for SCRATCH_2
and PERSISTENT_1
deployment types is
* supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data in
* Transit.
*
*
* @param deploymentType
* (Optional) Choose SCRATCH_1
and SCRATCH_2
deployment types when you need
* temporary storage and shorter-term processing of data. The SCRATCH_2
deployment type provides
* in-transit encryption of data and higher burst throughput capacity than SCRATCH_1
.
*
* Choose PERSISTENT_1
deployment type for longer-term storage and workloads and encryption of
* data in transit. To learn more about deployment types, see FSx for Lustre
* Deployment Options.
*
*
* Encryption of data in-transit is automatically enabled when you access a SCRATCH_2
or
* PERSISTENT_1
file system from Amazon EC2 instances that support this feature.
* (Default = SCRATCH_1
)
*
*
* Encryption of data in-transit for SCRATCH_2
and PERSISTENT_1
deployment types is
* supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data
* in Transit.
* @return Returns a reference to this object so that method calls can be chained together.
* @see LustreDeploymentType
*/
public CreateFileSystemLustreConfiguration withDeploymentType(LustreDeploymentType deploymentType) {
this.deploymentType = deploymentType.toString();
return this;
}
/**
*
* Required for the PERSISTENT_1
deployment type, describes the amount of read and write throughput for
* each 1 tebibyte of storage, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system
* storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4 TiB file system, provisioning 50
* MB/s/TiB of PerUnitStorageThroughput yields 117 MB/s of file system throughput. You pay for the amount of
* throughput that you provision.
*
*
* Valid values are 50, 100, 200.
*
*
* @param perUnitStorageThroughput
* Required for the PERSISTENT_1
deployment type, describes the amount of read and write
* throughput for each 1 tebibyte of storage, in MB/s/TiB. File system throughput capacity is calculated by
* multiplying file system storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4 TiB
* file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 117 MB/s of file system throughput.
* You pay for the amount of throughput that you provision.
*
* Valid values are 50, 100, 200.
*/
public void setPerUnitStorageThroughput(Integer perUnitStorageThroughput) {
this.perUnitStorageThroughput = perUnitStorageThroughput;
}
/**
*
* Required for the PERSISTENT_1
deployment type, describes the amount of read and write throughput for
* each 1 tebibyte of storage, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system
* storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4 TiB file system, provisioning 50
* MB/s/TiB of PerUnitStorageThroughput yields 117 MB/s of file system throughput. You pay for the amount of
* throughput that you provision.
*
*
* Valid values are 50, 100, 200.
*
*
* @return Required for the PERSISTENT_1
deployment type, describes the amount of read and write
* throughput for each 1 tebibyte of storage, in MB/s/TiB. File system throughput capacity is calculated by
* multiplying file system storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4 TiB
* file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 117 MB/s of file system
* throughput. You pay for the amount of throughput that you provision.
*
* Valid values are 50, 100, 200.
*/
public Integer getPerUnitStorageThroughput() {
return this.perUnitStorageThroughput;
}
/**
*
* Required for the PERSISTENT_1
deployment type, describes the amount of read and write throughput for
* each 1 tebibyte of storage, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system
* storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4 TiB file system, provisioning 50
* MB/s/TiB of PerUnitStorageThroughput yields 117 MB/s of file system throughput. You pay for the amount of
* throughput that you provision.
*
*
* Valid values are 50, 100, 200.
*
*
* @param perUnitStorageThroughput
* Required for the PERSISTENT_1
deployment type, describes the amount of read and write
* throughput for each 1 tebibyte of storage, in MB/s/TiB. File system throughput capacity is calculated by
* multiplying file system storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4 TiB
* file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 117 MB/s of file system throughput.
* You pay for the amount of throughput that you provision.
*
* Valid values are 50, 100, 200.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateFileSystemLustreConfiguration withPerUnitStorageThroughput(Integer perUnitStorageThroughput) {
setPerUnitStorageThroughput(perUnitStorageThroughput);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getWeeklyMaintenanceStartTime() != null)
sb.append("WeeklyMaintenanceStartTime: ").append(getWeeklyMaintenanceStartTime()).append(",");
if (getImportPath() != null)
sb.append("ImportPath: ").append(getImportPath()).append(",");
if (getExportPath() != null)
sb.append("ExportPath: ").append(getExportPath()).append(",");
if (getImportedFileChunkSize() != null)
sb.append("ImportedFileChunkSize: ").append(getImportedFileChunkSize()).append(",");
if (getDeploymentType() != null)
sb.append("DeploymentType: ").append(getDeploymentType()).append(",");
if (getPerUnitStorageThroughput() != null)
sb.append("PerUnitStorageThroughput: ").append(getPerUnitStorageThroughput());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof CreateFileSystemLustreConfiguration == false)
return false;
CreateFileSystemLustreConfiguration other = (CreateFileSystemLustreConfiguration) obj;
if (other.getWeeklyMaintenanceStartTime() == null ^ this.getWeeklyMaintenanceStartTime() == null)
return false;
if (other.getWeeklyMaintenanceStartTime() != null && other.getWeeklyMaintenanceStartTime().equals(this.getWeeklyMaintenanceStartTime()) == false)
return false;
if (other.getImportPath() == null ^ this.getImportPath() == null)
return false;
if (other.getImportPath() != null && other.getImportPath().equals(this.getImportPath()) == false)
return false;
if (other.getExportPath() == null ^ this.getExportPath() == null)
return false;
if (other.getExportPath() != null && other.getExportPath().equals(this.getExportPath()) == false)
return false;
if (other.getImportedFileChunkSize() == null ^ this.getImportedFileChunkSize() == null)
return false;
if (other.getImportedFileChunkSize() != null && other.getImportedFileChunkSize().equals(this.getImportedFileChunkSize()) == false)
return false;
if (other.getDeploymentType() == null ^ this.getDeploymentType() == null)
return false;
if (other.getDeploymentType() != null && other.getDeploymentType().equals(this.getDeploymentType()) == false)
return false;
if (other.getPerUnitStorageThroughput() == null ^ this.getPerUnitStorageThroughput() == null)
return false;
if (other.getPerUnitStorageThroughput() != null && other.getPerUnitStorageThroughput().equals(this.getPerUnitStorageThroughput()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getWeeklyMaintenanceStartTime() == null) ? 0 : getWeeklyMaintenanceStartTime().hashCode());
hashCode = prime * hashCode + ((getImportPath() == null) ? 0 : getImportPath().hashCode());
hashCode = prime * hashCode + ((getExportPath() == null) ? 0 : getExportPath().hashCode());
hashCode = prime * hashCode + ((getImportedFileChunkSize() == null) ? 0 : getImportedFileChunkSize().hashCode());
hashCode = prime * hashCode + ((getDeploymentType() == null) ? 0 : getDeploymentType().hashCode());
hashCode = prime * hashCode + ((getPerUnitStorageThroughput() == null) ? 0 : getPerUnitStorageThroughput().hashCode());
return hashCode;
}
@Override
public CreateFileSystemLustreConfiguration clone() {
try {
return (CreateFileSystemLustreConfiguration) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.fsx.model.transform.CreateFileSystemLustreConfigurationMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}