All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.amazonaws.services.machinelearning.model.S3DataSpec Maven / Gradle / Ivy

Go to download

The AWS Java SDK for Amazon Machine Learning module holds the client classes that is used for communicating with Amazon Machine Learning Service

There is a newer version: 1.11.69
Show newest version
/*
 * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights
 * Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License").
 * You may not use this file except in compliance with the License.
 * A copy of the License is located at
 *
 *  http://aws.amazon.com/apache2.0
 *
 * or in the "license" file accompanying this file. This file is distributed
 * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
 * express or implied. See the License for the specific language governing
 * permissions and limitations under the License.
 */

package com.amazonaws.services.machinelearning.model;

import java.io.Serializable;

/**
 * 

* Describes the data specification of a DataSource. *

*/ public class S3DataSpec implements Serializable, Cloneable { /** *

* The location of the data file(s) used by a DataSource. The * URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) * directory or bucket containing data files. *

*/ private String dataLocationS3; /** *

* Describes the splitting requirement of a Datasource. *

*/ private String dataRearrangement; /** *

* A JSON string that represents the schema for an Amazon S3 * DataSource. The DataSchema defines the * structure of the observation data in the data file(s) referenced in the * DataSource. *

*

* Define your DataSchema as a series of key-value pairs. * attributes and excludedVariableNames have an * array of key-value pairs for their value. Use the following format to * define your DataSchema. *

*

* { "version": "1.0", *

*

* "recordAnnotationFieldName": "F1", *

*

* "recordWeightFieldName": "F2", *

*

* "targetFieldName": "F3", *

*

* "dataFormat": "CSV", *

*

* "dataFileContainsHeader": true, *

*

* "attributes": [ *

*

* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", * "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" * }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", * "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, * { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { * "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ], *

*

* "excludedVariableNames": [ "F6" ] } *

* */ private String dataSchema; /** *

* Describes the schema Location in Amazon S3. *

*/ private String dataSchemaLocationS3; /** *

* The location of the data file(s) used by a DataSource. The * URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) * directory or bucket containing data files. *

* * @param dataLocationS3 * The location of the data file(s) used by a DataSource * . The URI specifies a data file or an Amazon Simple Storage * Service (Amazon S3) directory or bucket containing data files. */ public void setDataLocationS3(String dataLocationS3) { this.dataLocationS3 = dataLocationS3; } /** *

* The location of the data file(s) used by a DataSource. The * URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) * directory or bucket containing data files. *

* * @return The location of the data file(s) used by a * DataSource. The URI specifies a data file or an * Amazon Simple Storage Service (Amazon S3) directory or bucket * containing data files. */ public String getDataLocationS3() { return this.dataLocationS3; } /** *

* The location of the data file(s) used by a DataSource. The * URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) * directory or bucket containing data files. *

* * @param dataLocationS3 * The location of the data file(s) used by a DataSource * . The URI specifies a data file or an Amazon Simple Storage * Service (Amazon S3) directory or bucket containing data files. * @return Returns a reference to this object so that method calls can be * chained together. */ public S3DataSpec withDataLocationS3(String dataLocationS3) { setDataLocationS3(dataLocationS3); return this; } /** *

* Describes the splitting requirement of a Datasource. *

* * @param dataRearrangement * Describes the splitting requirement of a Datasource. */ public void setDataRearrangement(String dataRearrangement) { this.dataRearrangement = dataRearrangement; } /** *

* Describes the splitting requirement of a Datasource. *

* * @return Describes the splitting requirement of a Datasource. */ public String getDataRearrangement() { return this.dataRearrangement; } /** *

* Describes the splitting requirement of a Datasource. *

* * @param dataRearrangement * Describes the splitting requirement of a Datasource. * @return Returns a reference to this object so that method calls can be * chained together. */ public S3DataSpec withDataRearrangement(String dataRearrangement) { setDataRearrangement(dataRearrangement); return this; } /** *

* A JSON string that represents the schema for an Amazon S3 * DataSource. The DataSchema defines the * structure of the observation data in the data file(s) referenced in the * DataSource. *

*

* Define your DataSchema as a series of key-value pairs. * attributes and excludedVariableNames have an * array of key-value pairs for their value. Use the following format to * define your DataSchema. *

*

* { "version": "1.0", *

*

* "recordAnnotationFieldName": "F1", *

*

* "recordWeightFieldName": "F2", *

*

* "targetFieldName": "F3", *

*

* "dataFormat": "CSV", *

*

* "dataFileContainsHeader": true, *

*

* "attributes": [ *

*

* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", * "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" * }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", * "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, * { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { * "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ], *

*

* "excludedVariableNames": [ "F6" ] } *

* * * @param dataSchema * A JSON string that represents the schema for an Amazon S3 * DataSource. The DataSchema defines the * structure of the observation data in the data file(s) referenced * in the DataSource.

*

* Define your DataSchema as a series of key-value * pairs. attributes and * excludedVariableNames have an array of key-value * pairs for their value. Use the following format to define your * DataSchema. *

*

* { "version": "1.0", *

*

* "recordAnnotationFieldName": "F1", *

*

* "recordWeightFieldName": "F2", *

*

* "targetFieldName": "F3", *

*

* "dataFormat": "CSV", *

*

* "dataFileContainsHeader": true, *

*

* "attributes": [ *

*

* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", * "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": * "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { * "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": * "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": * "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": * "WEIGHTED_STRING_SEQUENCE" } ], *

*

* "excludedVariableNames": [ "F6" ] } *

*/ public void setDataSchema(String dataSchema) { this.dataSchema = dataSchema; } /** *

* A JSON string that represents the schema for an Amazon S3 * DataSource. The DataSchema defines the * structure of the observation data in the data file(s) referenced in the * DataSource. *

*

* Define your DataSchema as a series of key-value pairs. * attributes and excludedVariableNames have an * array of key-value pairs for their value. Use the following format to * define your DataSchema. *

*

* { "version": "1.0", *

*

* "recordAnnotationFieldName": "F1", *

*

* "recordWeightFieldName": "F2", *

*

* "targetFieldName": "F3", *

*

* "dataFormat": "CSV", *

*

* "dataFileContainsHeader": true, *

*

* "attributes": [ *

*

* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", * "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" * }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", * "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, * { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { * "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ], *

*

* "excludedVariableNames": [ "F6" ] } *

* * * @return A JSON string that represents the schema for an Amazon S3 * DataSource. The DataSchema defines the * structure of the observation data in the data file(s) referenced * in the DataSource.

*

* Define your DataSchema as a series of key-value * pairs. attributes and * excludedVariableNames have an array of key-value * pairs for their value. Use the following format to define your * DataSchema. *

*

* { "version": "1.0", *

*

* "recordAnnotationFieldName": "F1", *

*

* "recordWeightFieldName": "F2", *

*

* "targetFieldName": "F3", *

*

* "dataFormat": "CSV", *

*

* "dataFileContainsHeader": true, *

*

* "attributes": [ *

*

* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", * "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": * "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { * "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": * "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": * "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": * "WEIGHTED_STRING_SEQUENCE" } ], *

*

* "excludedVariableNames": [ "F6" ] } *

*/ public String getDataSchema() { return this.dataSchema; } /** *

* A JSON string that represents the schema for an Amazon S3 * DataSource. The DataSchema defines the * structure of the observation data in the data file(s) referenced in the * DataSource. *

*

* Define your DataSchema as a series of key-value pairs. * attributes and excludedVariableNames have an * array of key-value pairs for their value. Use the following format to * define your DataSchema. *

*

* { "version": "1.0", *

*

* "recordAnnotationFieldName": "F1", *

*

* "recordWeightFieldName": "F2", *

*

* "targetFieldName": "F3", *

*

* "dataFormat": "CSV", *

*

* "dataFileContainsHeader": true, *

*

* "attributes": [ *

*

* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", * "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" * }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", * "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, * { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { * "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ], *

*

* "excludedVariableNames": [ "F6" ] } *

* * * @param dataSchema * A JSON string that represents the schema for an Amazon S3 * DataSource. The DataSchema defines the * structure of the observation data in the data file(s) referenced * in the DataSource.

*

* Define your DataSchema as a series of key-value * pairs. attributes and * excludedVariableNames have an array of key-value * pairs for their value. Use the following format to define your * DataSchema. *

*

* { "version": "1.0", *

*

* "recordAnnotationFieldName": "F1", *

*

* "recordWeightFieldName": "F2", *

*

* "targetFieldName": "F3", *

*

* "dataFormat": "CSV", *

*

* "dataFileContainsHeader": true, *

*

* "attributes": [ *

*

* { "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", * "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": * "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { * "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": * "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": * "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": * "WEIGHTED_STRING_SEQUENCE" } ], *

*

* "excludedVariableNames": [ "F6" ] } *

* @return Returns a reference to this object so that method calls can be * chained together. */ public S3DataSpec withDataSchema(String dataSchema) { setDataSchema(dataSchema); return this; } /** *

* Describes the schema Location in Amazon S3. *

* * @param dataSchemaLocationS3 * Describes the schema Location in Amazon S3. */ public void setDataSchemaLocationS3(String dataSchemaLocationS3) { this.dataSchemaLocationS3 = dataSchemaLocationS3; } /** *

* Describes the schema Location in Amazon S3. *

* * @return Describes the schema Location in Amazon S3. */ public String getDataSchemaLocationS3() { return this.dataSchemaLocationS3; } /** *

* Describes the schema Location in Amazon S3. *

* * @param dataSchemaLocationS3 * Describes the schema Location in Amazon S3. * @return Returns a reference to this object so that method calls can be * chained together. */ public S3DataSpec withDataSchemaLocationS3(String dataSchemaLocationS3) { setDataSchemaLocationS3(dataSchemaLocationS3); return this; } /** * Returns a string representation of this object; useful for testing and * debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getDataLocationS3() != null) sb.append("DataLocationS3: " + getDataLocationS3() + ","); if (getDataRearrangement() != null) sb.append("DataRearrangement: " + getDataRearrangement() + ","); if (getDataSchema() != null) sb.append("DataSchema: " + getDataSchema() + ","); if (getDataSchemaLocationS3() != null) sb.append("DataSchemaLocationS3: " + getDataSchemaLocationS3()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof S3DataSpec == false) return false; S3DataSpec other = (S3DataSpec) obj; if (other.getDataLocationS3() == null ^ this.getDataLocationS3() == null) return false; if (other.getDataLocationS3() != null && other.getDataLocationS3().equals(this.getDataLocationS3()) == false) return false; if (other.getDataRearrangement() == null ^ this.getDataRearrangement() == null) return false; if (other.getDataRearrangement() != null && other.getDataRearrangement().equals( this.getDataRearrangement()) == false) return false; if (other.getDataSchema() == null ^ this.getDataSchema() == null) return false; if (other.getDataSchema() != null && other.getDataSchema().equals(this.getDataSchema()) == false) return false; if (other.getDataSchemaLocationS3() == null ^ this.getDataSchemaLocationS3() == null) return false; if (other.getDataSchemaLocationS3() != null && other.getDataSchemaLocationS3().equals( this.getDataSchemaLocationS3()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getDataLocationS3() == null) ? 0 : getDataLocationS3() .hashCode()); hashCode = prime * hashCode + ((getDataRearrangement() == null) ? 0 : getDataRearrangement().hashCode()); hashCode = prime * hashCode + ((getDataSchema() == null) ? 0 : getDataSchema().hashCode()); hashCode = prime * hashCode + ((getDataSchemaLocationS3() == null) ? 0 : getDataSchemaLocationS3().hashCode()); return hashCode; } @Override public S3DataSpec clone() { try { return (S3DataSpec) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException( "Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy