
com.amazonaws.services.machinelearning.model.S3DataSpec Maven / Gradle / Ivy
Show all versions of aws-java-sdk-osgi Show documentation
/*
* Copyright 2011-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not
* use this file except in compliance with the License. A copy of the License is
* located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.machinelearning.model;
import java.io.Serializable;
/**
*
* Describes the data specification of a DataSource
.
*
*/
public class S3DataSpec implements Serializable, Cloneable {
/**
*
* The location of the data file(s) used by a DataSource
. The
* URI specifies a data file or an Amazon Simple Storage Service (Amazon S3)
* directory or bucket containing data files.
*
*/
private String dataLocationS3;
/**
*
* A JSON string that represents the splitting and rearrangement processing
* to be applied to a DataSource
. If the
* DataRearrangement
parameter is not provided, all of the
* input data is used to create the Datasource
.
*
*
* There are multiple parameters that control what data is used to create a
* datasource:
*
*
* -
*
* percentBegin
*
*
* Use percentBegin
to indicate the beginning of the range of
* the data used to create the Datasource. If you do not include
* percentBegin
and percentEnd
, Amazon ML includes
* all of the data when creating the datasource.
*
*
* -
*
* percentEnd
*
*
* Use percentEnd
to indicate the end of the range of the data
* used to create the Datasource. If you do not include
* percentBegin
and percentEnd
, Amazon ML includes
* all of the data when creating the datasource.
*
*
* -
*
* complement
*
*
* The complement
parameter instructs Amazon ML to use the data
* that is not included in the range of percentBegin
to
* percentEnd
to create a datasource. The
* complement
parameter is useful if you need to create
* complementary datasources for training and evaluation. To create a
* complementary datasource, use the same values for
* percentBegin
and percentEnd
, along with the
* complement
parameter.
*
*
* For example, the following two datasources do not share any data, and can
* be used to train and evaluate a model. The first datasource has 25
* percent of the data, and the second one has 75 percent of the data.
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":0, "percentEnd":25}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
*
*
* -
*
* strategy
*
*
* To change how Amazon ML splits the data for a datasource, use the
* strategy
parameter.
*
*
* The default value for the strategy
parameter is
* sequential
, meaning that Amazon ML takes all of the data
* records between the percentBegin
and percentEnd
* parameters for the datasource, in the order that the records appear in
* the input data.
*
*
* The following two DataRearrangement
lines are examples of
* sequentially ordered training and evaluation datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
*
*
* To randomly split the input data into the proportions indicated by the
* percentBegin and percentEnd parameters, set the strategy
* parameter to random
and provide a string that is used as the
* seed value for the random data splitting (for example, you can use the S3
* path to your data as the random seed string). If you choose the random
* split strategy, Amazon ML assigns each row of data a pseudo-random number
* between 0 and 100, and then selects the rows that have an assigned number
* between percentBegin
and percentEnd
.
* Pseudo-random numbers are assigned using both the input seed string value
* and the byte offset as a seed, so changing the data results in a
* different split. Any existing ordering is preserved. The random splitting
* strategy ensures that variables in the training and evaluation data are
* distributed similarly. It is useful in the cases where the input data may
* have an implicit sort order, which would otherwise result in training and
* evaluation datasources containing non-similar data records.
*
*
* The following two DataRearrangement
lines are examples of
* non-sequentially ordered training and evaluation datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
*
*
*
*/
private String dataRearrangement;
/**
*
* A JSON string that represents the schema for an Amazon S3
* DataSource
. The DataSchema
defines the
* structure of the observation data in the data file(s) referenced in the
* DataSource
.
*
*
* You must provide either the DataSchema
or the
* DataSchemaLocationS3
.
*
*
* Define your DataSchema
as a series of key-value pairs.
* attributes
and excludedVariableNames
have an
* array of key-value pairs for their value. Use the following format to
* define your DataSchema
.
*
*
* { "version": "1.0",
*
*
* "recordAnnotationFieldName": "F1",
*
*
* "recordWeightFieldName": "F2",
*
*
* "targetFieldName": "F3",
*
*
* "dataFormat": "CSV",
*
*
* "dataFileContainsHeader": true,
*
*
* "attributes": [
*
*
* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2",
* "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL"
* }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5",
* "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" },
* { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, {
* "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
*
*
* "excludedVariableNames": [ "F6" ] }
*
*
*/
private String dataSchema;
/**
*
* Describes the schema location in Amazon S3. You must provide either the
* DataSchema
or the DataSchemaLocationS3
.
*
*/
private String dataSchemaLocationS3;
/**
*
* The location of the data file(s) used by a DataSource
. The
* URI specifies a data file or an Amazon Simple Storage Service (Amazon S3)
* directory or bucket containing data files.
*
*
* @param dataLocationS3
* The location of the data file(s) used by a DataSource
* . The URI specifies a data file or an Amazon Simple Storage
* Service (Amazon S3) directory or bucket containing data files.
*/
public void setDataLocationS3(String dataLocationS3) {
this.dataLocationS3 = dataLocationS3;
}
/**
*
* The location of the data file(s) used by a DataSource
. The
* URI specifies a data file or an Amazon Simple Storage Service (Amazon S3)
* directory or bucket containing data files.
*
*
* @return The location of the data file(s) used by a
* DataSource
. The URI specifies a data file or an
* Amazon Simple Storage Service (Amazon S3) directory or bucket
* containing data files.
*/
public String getDataLocationS3() {
return this.dataLocationS3;
}
/**
*
* The location of the data file(s) used by a DataSource
. The
* URI specifies a data file or an Amazon Simple Storage Service (Amazon S3)
* directory or bucket containing data files.
*
*
* @param dataLocationS3
* The location of the data file(s) used by a DataSource
* . The URI specifies a data file or an Amazon Simple Storage
* Service (Amazon S3) directory or bucket containing data files.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public S3DataSpec withDataLocationS3(String dataLocationS3) {
setDataLocationS3(dataLocationS3);
return this;
}
/**
*
* A JSON string that represents the splitting and rearrangement processing
* to be applied to a DataSource
. If the
* DataRearrangement
parameter is not provided, all of the
* input data is used to create the Datasource
.
*
*
* There are multiple parameters that control what data is used to create a
* datasource:
*
*
* -
*
* percentBegin
*
*
* Use percentBegin
to indicate the beginning of the range of
* the data used to create the Datasource. If you do not include
* percentBegin
and percentEnd
, Amazon ML includes
* all of the data when creating the datasource.
*
*
* -
*
* percentEnd
*
*
* Use percentEnd
to indicate the end of the range of the data
* used to create the Datasource. If you do not include
* percentBegin
and percentEnd
, Amazon ML includes
* all of the data when creating the datasource.
*
*
* -
*
* complement
*
*
* The complement
parameter instructs Amazon ML to use the data
* that is not included in the range of percentBegin
to
* percentEnd
to create a datasource. The
* complement
parameter is useful if you need to create
* complementary datasources for training and evaluation. To create a
* complementary datasource, use the same values for
* percentBegin
and percentEnd
, along with the
* complement
parameter.
*
*
* For example, the following two datasources do not share any data, and can
* be used to train and evaluate a model. The first datasource has 25
* percent of the data, and the second one has 75 percent of the data.
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":0, "percentEnd":25}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
*
*
* -
*
* strategy
*
*
* To change how Amazon ML splits the data for a datasource, use the
* strategy
parameter.
*
*
* The default value for the strategy
parameter is
* sequential
, meaning that Amazon ML takes all of the data
* records between the percentBegin
and percentEnd
* parameters for the datasource, in the order that the records appear in
* the input data.
*
*
* The following two DataRearrangement
lines are examples of
* sequentially ordered training and evaluation datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
*
*
* To randomly split the input data into the proportions indicated by the
* percentBegin and percentEnd parameters, set the strategy
* parameter to random
and provide a string that is used as the
* seed value for the random data splitting (for example, you can use the S3
* path to your data as the random seed string). If you choose the random
* split strategy, Amazon ML assigns each row of data a pseudo-random number
* between 0 and 100, and then selects the rows that have an assigned number
* between percentBegin
and percentEnd
.
* Pseudo-random numbers are assigned using both the input seed string value
* and the byte offset as a seed, so changing the data results in a
* different split. Any existing ordering is preserved. The random splitting
* strategy ensures that variables in the training and evaluation data are
* distributed similarly. It is useful in the cases where the input data may
* have an implicit sort order, which would otherwise result in training and
* evaluation datasources containing non-similar data records.
*
*
* The following two DataRearrangement
lines are examples of
* non-sequentially ordered training and evaluation datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
*
*
*
*
* @param dataRearrangement
* A JSON string that represents the splitting and rearrangement
* processing to be applied to a DataSource
. If the
* DataRearrangement
parameter is not provided, all of
* the input data is used to create the Datasource
.
*
* There are multiple parameters that control what data is used to
* create a datasource:
*
*
* -
*
* percentBegin
*
*
* Use percentBegin
to indicate the beginning of the
* range of the data used to create the Datasource. If you do not
* include percentBegin
and percentEnd
,
* Amazon ML includes all of the data when creating the datasource.
*
*
* -
*
* percentEnd
*
*
* Use percentEnd
to indicate the end of the range of
* the data used to create the Datasource. If you do not include
* percentBegin
and percentEnd
, Amazon ML
* includes all of the data when creating the datasource.
*
*
* -
*
* complement
*
*
* The complement
parameter instructs Amazon ML to use
* the data that is not included in the range of
* percentBegin
to percentEnd
to create a
* datasource. The complement
parameter is useful if you
* need to create complementary datasources for training and
* evaluation. To create a complementary datasource, use the same
* values for percentBegin
and percentEnd
,
* along with the complement
parameter.
*
*
* For example, the following two datasources do not share any data,
* and can be used to train and evaluate a model. The first
* datasource has 25 percent of the data, and the second one has 75
* percent of the data.
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":0, "percentEnd":25}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
*
*
* -
*
* strategy
*
*
* To change how Amazon ML splits the data for a datasource, use the
* strategy
parameter.
*
*
* The default value for the strategy
parameter is
* sequential
, meaning that Amazon ML takes all of the
* data records between the percentBegin
and
* percentEnd
parameters for the datasource, in the
* order that the records appear in the input data.
*
*
* The following two DataRearrangement
lines are
* examples of sequentially ordered training and evaluation
* datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
*
*
* To randomly split the input data into the proportions indicated by
* the percentBegin and percentEnd parameters, set the
* strategy
parameter to random
and provide
* a string that is used as the seed value for the random data
* splitting (for example, you can use the S3 path to your data as
* the random seed string). If you choose the random split strategy,
* Amazon ML assigns each row of data a pseudo-random number between
* 0 and 100, and then selects the rows that have an assigned number
* between percentBegin
and percentEnd
.
* Pseudo-random numbers are assigned using both the input seed
* string value and the byte offset as a seed, so changing the data
* results in a different split. Any existing ordering is preserved.
* The random splitting strategy ensures that variables in the
* training and evaluation data are distributed similarly. It is
* useful in the cases where the input data may have an implicit sort
* order, which would otherwise result in training and evaluation
* datasources containing non-similar data records.
*
*
* The following two DataRearrangement
lines are
* examples of non-sequentially ordered training and evaluation
* datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
*
*
*/
public void setDataRearrangement(String dataRearrangement) {
this.dataRearrangement = dataRearrangement;
}
/**
*
* A JSON string that represents the splitting and rearrangement processing
* to be applied to a DataSource
. If the
* DataRearrangement
parameter is not provided, all of the
* input data is used to create the Datasource
.
*
*
* There are multiple parameters that control what data is used to create a
* datasource:
*
*
* -
*
* percentBegin
*
*
* Use percentBegin
to indicate the beginning of the range of
* the data used to create the Datasource. If you do not include
* percentBegin
and percentEnd
, Amazon ML includes
* all of the data when creating the datasource.
*
*
* -
*
* percentEnd
*
*
* Use percentEnd
to indicate the end of the range of the data
* used to create the Datasource. If you do not include
* percentBegin
and percentEnd
, Amazon ML includes
* all of the data when creating the datasource.
*
*
* -
*
* complement
*
*
* The complement
parameter instructs Amazon ML to use the data
* that is not included in the range of percentBegin
to
* percentEnd
to create a datasource. The
* complement
parameter is useful if you need to create
* complementary datasources for training and evaluation. To create a
* complementary datasource, use the same values for
* percentBegin
and percentEnd
, along with the
* complement
parameter.
*
*
* For example, the following two datasources do not share any data, and can
* be used to train and evaluate a model. The first datasource has 25
* percent of the data, and the second one has 75 percent of the data.
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":0, "percentEnd":25}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
*
*
* -
*
* strategy
*
*
* To change how Amazon ML splits the data for a datasource, use the
* strategy
parameter.
*
*
* The default value for the strategy
parameter is
* sequential
, meaning that Amazon ML takes all of the data
* records between the percentBegin
and percentEnd
* parameters for the datasource, in the order that the records appear in
* the input data.
*
*
* The following two DataRearrangement
lines are examples of
* sequentially ordered training and evaluation datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
*
*
* To randomly split the input data into the proportions indicated by the
* percentBegin and percentEnd parameters, set the strategy
* parameter to random
and provide a string that is used as the
* seed value for the random data splitting (for example, you can use the S3
* path to your data as the random seed string). If you choose the random
* split strategy, Amazon ML assigns each row of data a pseudo-random number
* between 0 and 100, and then selects the rows that have an assigned number
* between percentBegin
and percentEnd
.
* Pseudo-random numbers are assigned using both the input seed string value
* and the byte offset as a seed, so changing the data results in a
* different split. Any existing ordering is preserved. The random splitting
* strategy ensures that variables in the training and evaluation data are
* distributed similarly. It is useful in the cases where the input data may
* have an implicit sort order, which would otherwise result in training and
* evaluation datasources containing non-similar data records.
*
*
* The following two DataRearrangement
lines are examples of
* non-sequentially ordered training and evaluation datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
*
*
*
*
* @return A JSON string that represents the splitting and rearrangement
* processing to be applied to a DataSource
. If the
* DataRearrangement
parameter is not provided, all of
* the input data is used to create the Datasource
.
*
* There are multiple parameters that control what data is used to
* create a datasource:
*
*
* -
*
* percentBegin
*
*
* Use percentBegin
to indicate the beginning of the
* range of the data used to create the Datasource. If you do not
* include percentBegin
and percentEnd
,
* Amazon ML includes all of the data when creating the datasource.
*
*
* -
*
* percentEnd
*
*
* Use percentEnd
to indicate the end of the range of
* the data used to create the Datasource. If you do not include
* percentBegin
and percentEnd
, Amazon ML
* includes all of the data when creating the datasource.
*
*
* -
*
* complement
*
*
* The complement
parameter instructs Amazon ML to use
* the data that is not included in the range of
* percentBegin
to percentEnd
to create a
* datasource. The complement
parameter is useful if
* you need to create complementary datasources for training and
* evaluation. To create a complementary datasource, use the same
* values for percentBegin
and percentEnd
,
* along with the complement
parameter.
*
*
* For example, the following two datasources do not share any data,
* and can be used to train and evaluate a model. The first
* datasource has 25 percent of the data, and the second one has 75
* percent of the data.
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":0, "percentEnd":25}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
*
*
* -
*
* strategy
*
*
* To change how Amazon ML splits the data for a datasource, use the
* strategy
parameter.
*
*
* The default value for the strategy
parameter is
* sequential
, meaning that Amazon ML takes all of the
* data records between the percentBegin
and
* percentEnd
parameters for the datasource, in the
* order that the records appear in the input data.
*
*
* The following two DataRearrangement
lines are
* examples of sequentially ordered training and evaluation
* datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
*
*
* To randomly split the input data into the proportions indicated
* by the percentBegin and percentEnd parameters, set the
* strategy
parameter to random
and
* provide a string that is used as the seed value for the random
* data splitting (for example, you can use the S3 path to your data
* as the random seed string). If you choose the random split
* strategy, Amazon ML assigns each row of data a pseudo-random
* number between 0 and 100, and then selects the rows that have an
* assigned number between percentBegin
and
* percentEnd
. Pseudo-random numbers are assigned using
* both the input seed string value and the byte offset as a seed,
* so changing the data results in a different split. Any existing
* ordering is preserved. The random splitting strategy ensures that
* variables in the training and evaluation data are distributed
* similarly. It is useful in the cases where the input data may
* have an implicit sort order, which would otherwise result in
* training and evaluation datasources containing non-similar data
* records.
*
*
* The following two DataRearrangement
lines are
* examples of non-sequentially ordered training and evaluation
* datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
*
*
*/
public String getDataRearrangement() {
return this.dataRearrangement;
}
/**
*
* A JSON string that represents the splitting and rearrangement processing
* to be applied to a DataSource
. If the
* DataRearrangement
parameter is not provided, all of the
* input data is used to create the Datasource
.
*
*
* There are multiple parameters that control what data is used to create a
* datasource:
*
*
* -
*
* percentBegin
*
*
* Use percentBegin
to indicate the beginning of the range of
* the data used to create the Datasource. If you do not include
* percentBegin
and percentEnd
, Amazon ML includes
* all of the data when creating the datasource.
*
*
* -
*
* percentEnd
*
*
* Use percentEnd
to indicate the end of the range of the data
* used to create the Datasource. If you do not include
* percentBegin
and percentEnd
, Amazon ML includes
* all of the data when creating the datasource.
*
*
* -
*
* complement
*
*
* The complement
parameter instructs Amazon ML to use the data
* that is not included in the range of percentBegin
to
* percentEnd
to create a datasource. The
* complement
parameter is useful if you need to create
* complementary datasources for training and evaluation. To create a
* complementary datasource, use the same values for
* percentBegin
and percentEnd
, along with the
* complement
parameter.
*
*
* For example, the following two datasources do not share any data, and can
* be used to train and evaluate a model. The first datasource has 25
* percent of the data, and the second one has 75 percent of the data.
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":0, "percentEnd":25}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
*
*
* -
*
* strategy
*
*
* To change how Amazon ML splits the data for a datasource, use the
* strategy
parameter.
*
*
* The default value for the strategy
parameter is
* sequential
, meaning that Amazon ML takes all of the data
* records between the percentBegin
and percentEnd
* parameters for the datasource, in the order that the records appear in
* the input data.
*
*
* The following two DataRearrangement
lines are examples of
* sequentially ordered training and evaluation datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
*
*
* To randomly split the input data into the proportions indicated by the
* percentBegin and percentEnd parameters, set the strategy
* parameter to random
and provide a string that is used as the
* seed value for the random data splitting (for example, you can use the S3
* path to your data as the random seed string). If you choose the random
* split strategy, Amazon ML assigns each row of data a pseudo-random number
* between 0 and 100, and then selects the rows that have an assigned number
* between percentBegin
and percentEnd
.
* Pseudo-random numbers are assigned using both the input seed string value
* and the byte offset as a seed, so changing the data results in a
* different split. Any existing ordering is preserved. The random splitting
* strategy ensures that variables in the training and evaluation data are
* distributed similarly. It is useful in the cases where the input data may
* have an implicit sort order, which would otherwise result in training and
* evaluation datasources containing non-similar data records.
*
*
* The following two DataRearrangement
lines are examples of
* non-sequentially ordered training and evaluation datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
*
*
*
*
* @param dataRearrangement
* A JSON string that represents the splitting and rearrangement
* processing to be applied to a DataSource
. If the
* DataRearrangement
parameter is not provided, all of
* the input data is used to create the Datasource
.
*
* There are multiple parameters that control what data is used to
* create a datasource:
*
*
* -
*
* percentBegin
*
*
* Use percentBegin
to indicate the beginning of the
* range of the data used to create the Datasource. If you do not
* include percentBegin
and percentEnd
,
* Amazon ML includes all of the data when creating the datasource.
*
*
* -
*
* percentEnd
*
*
* Use percentEnd
to indicate the end of the range of
* the data used to create the Datasource. If you do not include
* percentBegin
and percentEnd
, Amazon ML
* includes all of the data when creating the datasource.
*
*
* -
*
* complement
*
*
* The complement
parameter instructs Amazon ML to use
* the data that is not included in the range of
* percentBegin
to percentEnd
to create a
* datasource. The complement
parameter is useful if you
* need to create complementary datasources for training and
* evaluation. To create a complementary datasource, use the same
* values for percentBegin
and percentEnd
,
* along with the complement
parameter.
*
*
* For example, the following two datasources do not share any data,
* and can be used to train and evaluate a model. The first
* datasource has 25 percent of the data, and the second one has 75
* percent of the data.
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":0, "percentEnd":25}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}
*
*
* -
*
* strategy
*
*
* To change how Amazon ML splits the data for a datasource, use the
* strategy
parameter.
*
*
* The default value for the strategy
parameter is
* sequential
, meaning that Amazon ML takes all of the
* data records between the percentBegin
and
* percentEnd
parameters for the datasource, in the
* order that the records appear in the input data.
*
*
* The following two DataRearrangement
lines are
* examples of sequentially ordered training and evaluation
* datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}
*
*
* To randomly split the input data into the proportions indicated by
* the percentBegin and percentEnd parameters, set the
* strategy
parameter to random
and provide
* a string that is used as the seed value for the random data
* splitting (for example, you can use the S3 path to your data as
* the random seed string). If you choose the random split strategy,
* Amazon ML assigns each row of data a pseudo-random number between
* 0 and 100, and then selects the rows that have an assigned number
* between percentBegin
and percentEnd
.
* Pseudo-random numbers are assigned using both the input seed
* string value and the byte offset as a seed, so changing the data
* results in a different split. Any existing ordering is preserved.
* The random splitting strategy ensures that variables in the
* training and evaluation data are distributed similarly. It is
* useful in the cases where the input data may have an implicit sort
* order, which would otherwise result in training and evaluation
* datasources containing non-similar data records.
*
*
* The following two DataRearrangement
lines are
* examples of non-sequentially ordered training and evaluation
* datasources:
*
*
* Datasource for evaluation:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}
*
*
* Datasource for training:
* {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
*
*
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public S3DataSpec withDataRearrangement(String dataRearrangement) {
setDataRearrangement(dataRearrangement);
return this;
}
/**
*
* A JSON string that represents the schema for an Amazon S3
* DataSource
. The DataSchema
defines the
* structure of the observation data in the data file(s) referenced in the
* DataSource
.
*
*
* You must provide either the DataSchema
or the
* DataSchemaLocationS3
.
*
*
* Define your DataSchema
as a series of key-value pairs.
* attributes
and excludedVariableNames
have an
* array of key-value pairs for their value. Use the following format to
* define your DataSchema
.
*
*
* { "version": "1.0",
*
*
* "recordAnnotationFieldName": "F1",
*
*
* "recordWeightFieldName": "F2",
*
*
* "targetFieldName": "F3",
*
*
* "dataFormat": "CSV",
*
*
* "dataFileContainsHeader": true,
*
*
* "attributes": [
*
*
* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2",
* "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL"
* }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5",
* "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" },
* { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, {
* "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
*
*
* "excludedVariableNames": [ "F6" ] }
*
*
*
* @param dataSchema
* A JSON string that represents the schema for an Amazon S3
* DataSource
. The DataSchema
defines the
* structure of the observation data in the data file(s) referenced
* in the DataSource
.
*
* You must provide either the DataSchema
or the
* DataSchemaLocationS3
.
*
*
* Define your DataSchema
as a series of key-value
* pairs. attributes
and
* excludedVariableNames
have an array of key-value
* pairs for their value. Use the following format to define your
* DataSchema
.
*
*
* { "version": "1.0",
*
*
* "recordAnnotationFieldName": "F1",
*
*
* "recordWeightFieldName": "F2",
*
*
* "targetFieldName": "F3",
*
*
* "dataFormat": "CSV",
*
*
* "dataFileContainsHeader": true,
*
*
* "attributes": [
*
*
* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2",
* "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType":
* "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, {
* "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName":
* "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType":
* "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType":
* "WEIGHTED_STRING_SEQUENCE" } ],
*
*
* "excludedVariableNames": [ "F6" ] }
*
*/
public void setDataSchema(String dataSchema) {
this.dataSchema = dataSchema;
}
/**
*
* A JSON string that represents the schema for an Amazon S3
* DataSource
. The DataSchema
defines the
* structure of the observation data in the data file(s) referenced in the
* DataSource
.
*
*
* You must provide either the DataSchema
or the
* DataSchemaLocationS3
.
*
*
* Define your DataSchema
as a series of key-value pairs.
* attributes
and excludedVariableNames
have an
* array of key-value pairs for their value. Use the following format to
* define your DataSchema
.
*
*
* { "version": "1.0",
*
*
* "recordAnnotationFieldName": "F1",
*
*
* "recordWeightFieldName": "F2",
*
*
* "targetFieldName": "F3",
*
*
* "dataFormat": "CSV",
*
*
* "dataFileContainsHeader": true,
*
*
* "attributes": [
*
*
* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2",
* "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL"
* }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5",
* "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" },
* { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, {
* "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
*
*
* "excludedVariableNames": [ "F6" ] }
*
*
*
* @return A JSON string that represents the schema for an Amazon S3
* DataSource
. The DataSchema
defines the
* structure of the observation data in the data file(s) referenced
* in the DataSource
.
*
* You must provide either the DataSchema
or the
* DataSchemaLocationS3
.
*
*
* Define your DataSchema
as a series of key-value
* pairs. attributes
and
* excludedVariableNames
have an array of key-value
* pairs for their value. Use the following format to define your
* DataSchema
.
*
*
* { "version": "1.0",
*
*
* "recordAnnotationFieldName": "F1",
*
*
* "recordWeightFieldName": "F2",
*
*
* "targetFieldName": "F3",
*
*
* "dataFormat": "CSV",
*
*
* "dataFileContainsHeader": true,
*
*
* "attributes": [
*
*
* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2",
* "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType":
* "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, {
* "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName":
* "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType":
* "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType":
* "WEIGHTED_STRING_SEQUENCE" } ],
*
*
* "excludedVariableNames": [ "F6" ] }
*
*/
public String getDataSchema() {
return this.dataSchema;
}
/**
*
* A JSON string that represents the schema for an Amazon S3
* DataSource
. The DataSchema
defines the
* structure of the observation data in the data file(s) referenced in the
* DataSource
.
*
*
* You must provide either the DataSchema
or the
* DataSchemaLocationS3
.
*
*
* Define your DataSchema
as a series of key-value pairs.
* attributes
and excludedVariableNames
have an
* array of key-value pairs for their value. Use the following format to
* define your DataSchema
.
*
*
* { "version": "1.0",
*
*
* "recordAnnotationFieldName": "F1",
*
*
* "recordWeightFieldName": "F2",
*
*
* "targetFieldName": "F3",
*
*
* "dataFormat": "CSV",
*
*
* "dataFileContainsHeader": true,
*
*
* "attributes": [
*
*
* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2",
* "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL"
* }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5",
* "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" },
* { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, {
* "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
*
*
* "excludedVariableNames": [ "F6" ] }
*
*
*
* @param dataSchema
* A JSON string that represents the schema for an Amazon S3
* DataSource
. The DataSchema
defines the
* structure of the observation data in the data file(s) referenced
* in the DataSource
.
*
* You must provide either the DataSchema
or the
* DataSchemaLocationS3
.
*
*
* Define your DataSchema
as a series of key-value
* pairs. attributes
and
* excludedVariableNames
have an array of key-value
* pairs for their value. Use the following format to define your
* DataSchema
.
*
*
* { "version": "1.0",
*
*
* "recordAnnotationFieldName": "F1",
*
*
* "recordWeightFieldName": "F2",
*
*
* "targetFieldName": "F3",
*
*
* "dataFormat": "CSV",
*
*
* "dataFileContainsHeader": true,
*
*
* "attributes": [
*
*
* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2",
* "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType":
* "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, {
* "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName":
* "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType":
* "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType":
* "WEIGHTED_STRING_SEQUENCE" } ],
*
*
* "excludedVariableNames": [ "F6" ] }
*
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public S3DataSpec withDataSchema(String dataSchema) {
setDataSchema(dataSchema);
return this;
}
/**
*
* Describes the schema location in Amazon S3. You must provide either the
* DataSchema
or the DataSchemaLocationS3
.
*
*
* @param dataSchemaLocationS3
* Describes the schema location in Amazon S3. You must provide
* either the DataSchema
or the
* DataSchemaLocationS3
.
*/
public void setDataSchemaLocationS3(String dataSchemaLocationS3) {
this.dataSchemaLocationS3 = dataSchemaLocationS3;
}
/**
*
* Describes the schema location in Amazon S3. You must provide either the
* DataSchema
or the DataSchemaLocationS3
.
*
*
* @return Describes the schema location in Amazon S3. You must provide
* either the DataSchema
or the
* DataSchemaLocationS3
.
*/
public String getDataSchemaLocationS3() {
return this.dataSchemaLocationS3;
}
/**
*
* Describes the schema location in Amazon S3. You must provide either the
* DataSchema
or the DataSchemaLocationS3
.
*
*
* @param dataSchemaLocationS3
* Describes the schema location in Amazon S3. You must provide
* either the DataSchema
or the
* DataSchemaLocationS3
.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public S3DataSpec withDataSchemaLocationS3(String dataSchemaLocationS3) {
setDataSchemaLocationS3(dataSchemaLocationS3);
return this;
}
/**
* Returns a string representation of this object; useful for testing and
* debugging.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getDataLocationS3() != null)
sb.append("DataLocationS3: " + getDataLocationS3() + ",");
if (getDataRearrangement() != null)
sb.append("DataRearrangement: " + getDataRearrangement() + ",");
if (getDataSchema() != null)
sb.append("DataSchema: " + getDataSchema() + ",");
if (getDataSchemaLocationS3() != null)
sb.append("DataSchemaLocationS3: " + getDataSchemaLocationS3());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof S3DataSpec == false)
return false;
S3DataSpec other = (S3DataSpec) obj;
if (other.getDataLocationS3() == null
^ this.getDataLocationS3() == null)
return false;
if (other.getDataLocationS3() != null
&& other.getDataLocationS3().equals(this.getDataLocationS3()) == false)
return false;
if (other.getDataRearrangement() == null
^ this.getDataRearrangement() == null)
return false;
if (other.getDataRearrangement() != null
&& other.getDataRearrangement().equals(
this.getDataRearrangement()) == false)
return false;
if (other.getDataSchema() == null ^ this.getDataSchema() == null)
return false;
if (other.getDataSchema() != null
&& other.getDataSchema().equals(this.getDataSchema()) == false)
return false;
if (other.getDataSchemaLocationS3() == null
^ this.getDataSchemaLocationS3() == null)
return false;
if (other.getDataSchemaLocationS3() != null
&& other.getDataSchemaLocationS3().equals(
this.getDataSchemaLocationS3()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime
* hashCode
+ ((getDataLocationS3() == null) ? 0 : getDataLocationS3()
.hashCode());
hashCode = prime
* hashCode
+ ((getDataRearrangement() == null) ? 0
: getDataRearrangement().hashCode());
hashCode = prime * hashCode
+ ((getDataSchema() == null) ? 0 : getDataSchema().hashCode());
hashCode = prime
* hashCode
+ ((getDataSchemaLocationS3() == null) ? 0
: getDataSchemaLocationS3().hashCode());
return hashCode;
}
@Override
public S3DataSpec clone() {
try {
return (S3DataSpec) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException(
"Got a CloneNotSupportedException from Object.clone() "
+ "even though we're Cloneable!", e);
}
}
}