All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.amazonaws.services.sagemaker.model.InputConfig Maven / Gradle / Ivy

Go to download

The AWS Java SDK for Amazon SageMaker module holds the client classes that are used for communicating with Amazon SageMaker Service

The newest version!
/*
 * Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
 * 
 * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
 * the License. A copy of the License is located at
 * 
 * http://aws.amazon.com/apache2.0
 * 
 * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
 * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
 * and limitations under the License.
 */
package com.amazonaws.services.sagemaker.model;

import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;

/**
 * 

* Contains information about the location of input model artifacts, the name and shape of the expected data inputs, and * the framework in which the model was trained. *

* * @see AWS API * Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class InputConfig implements Serializable, Cloneable, StructuredPojo { /** *

* The S3 path where the model artifacts, which result from model training, are stored. This path must point to a * single gzip compressed tar archive (.tar.gz suffix). *

*/ private String s3Uri; /** *

* Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The * data inputs are Framework specific. *

*
    *
  • *

    * TensorFlow: You must specify the name and shape (NHWC format) of the expected data inputs using a * dictionary format for your trained model. The dictionary formats required for the console and CLI are different. *

    *
      *
    • *

      * Examples for one input: *

      *
        *
      • *

        * If using the console, {"input":[1,1024,1024,3]} *

        *
      • *
      • *

        * If using the CLI, {\"input\":[1,1024,1024,3]} *

        *
      • *
      *
    • *
    • *

      * Examples for two inputs: *

      *
        *
      • *

        * If using the console, {"data1": [1,28,28,1], "data2":[1,28,28,1]} *

        *
      • *
      • *

        * If using the CLI, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]} *

        *
      • *
      *
    • *
    *
  • *
  • *

    * KERAS: You must specify the name and shape (NCHW format) of expected data inputs using a dictionary * format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC (channel-last) * format, DataInputConfig should be specified in NCHW (channel-first) format. The dictionary formats * required for the console and CLI are different. *

    *
      *
    • *

      * Examples for one input: *

      *
        *
      • *

        * If using the console, {"input_1":[1,3,224,224]} *

        *
      • *
      • *

        * If using the CLI, {\"input_1\":[1,3,224,224]} *

        *
      • *
      *
    • *
    • *

      * Examples for two inputs: *

      *
        *
      • *

        * If using the console, {"input_1": [1,3,224,224], "input_2":[1,3,224,224]} *

        *
      • *
      • *

        * If using the CLI, {\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} *

        *
      • *
      *
    • *
    *
  • *
  • *

    * MXNET/ONNX/DARKNET: You must specify the name and shape (NCHW format) of the expected data inputs in * order using a dictionary format for your trained model. The dictionary formats required for the console and CLI * are different. *

    *
      *
    • *

      * Examples for one input: *

      *
        *
      • *

        * If using the console, {"data":[1,3,1024,1024]} *

        *
      • *
      • *

        * If using the CLI, {\"data\":[1,3,1024,1024]} *

        *
      • *
      *
    • *
    • *

      * Examples for two inputs: *

      *
        *
      • *

        * If using the console, {"var1": [1,1,28,28], "var2":[1,1,28,28]} *

        *
      • *
      • *

        * If using the CLI, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} *

        *
      • *
      *
    • *
    *
  • *
  • *

    * PyTorch: You can either specify the name and shape (NCHW format) of expected data inputs in order * using a dictionary format for your trained model or you can specify the shape only using a list format. The * dictionary formats required for the console and CLI are different. The list formats for the console and CLI are * the same. *

    *
      *
    • *

      * Examples for one input in dictionary format: *

      *
        *
      • *

        * If using the console, {"input0":[1,3,224,224]} *

        *
      • *
      • *

        * If using the CLI, {\"input0\":[1,3,224,224]} *

        *
      • *
      *
    • *
    • *

      * Example for one input in list format: [[1,3,224,224]] *

      *
    • *
    • *

      * Examples for two inputs in dictionary format: *

      *
        *
      • *

        * If using the console, {"input0":[1,3,224,224], "input1":[1,3,224,224]} *

        *
      • *
      • *

        * If using the CLI, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]} *

        *
      • *
      *
    • *
    • *

      * Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]] *

      *
    • *
    *
  • *
  • *

    * XGBOOST: input data name and shape are not needed. *

    *
  • *
*

* DataInputConfig supports the following parameters for CoreML TargetDevice * (ML Model format): *

*
    *
  • *

    * shape: Input shape, for example {"input_1": {"shape": [1,224,224,3]}}. In addition to * static input shapes, CoreML converter supports Flexible input shapes: *

    *
      *
    • *

      * Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some specific * interval in that dimension, for example: {"input_1": {"shape": ["1..10", 224, 224, 3]}} *

      *
    • *
    • *

      * Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can enumerate * all supported input shapes, for example: * {"input_1": {"shape": [[1, 224, 224, 3], [1, 160, 160, 3]]}} *

      *
    • *
    *
  • *
  • *

    * default_shape: Default input shape. You can set a default shape during conversion for both Range * Dimension and Enumerated Shapes. For example * {"input_1": {"shape": ["1..10", 224, 224, 3], "default_shape": [1, 224, 224, 3]}} *

    *
  • *
  • *

    * type: Input type. Allowed values: Image and Tensor. By default, the * converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be Image. * Image input type requires additional input parameters such as bias and scale. *

    *
  • *
  • *

    * bias: If the input type is an Image, you need to provide the bias vector. *

    *
  • *
  • *

    * scale: If the input type is an Image, you need to provide a scale factor. *

    *
  • *
*

* CoreML ClassifierConfig parameters can be specified using OutputConfig * CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion * examples: *

*
    *
  • *

    * Tensor type input: *

    *
      *
    • *

      * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3]}} *

      *
    • *
    *
  • *
  • *

    * Tensor type input without input name (PyTorch): *

    *
      *
    • *

      * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224]}] *

      *
    • *
    *
  • *
  • *

    * Image type input: *

    *
      *
    • *

      * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}} *

      *
    • *
    • *

      * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

      *
    • *
    *
  • *
  • *

    * Image type input without input name (PyTorch): *

    *
      *
    • *

      * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}] *

      *
    • *
    • *

      * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

      *
    • *
    *
  • *
*

* Depending on the model format, DataInputConfig requires the following parameters for * ml_eia2 OutputConfig:TargetDevice. *

*
    *
  • *

    * For TensorFlow models saved in the SavedModel format, specify the input names from signature_def_key * and the input model shapes for DataInputConfig. Specify the signature_def_key in OutputConfig:CompilerOptions if the model does not use TensorFlow's default signature def * key. For example: *

    *
      *
    • *

      * "DataInputConfig": {"inputs": [1, 224, 224, 3]} *

      *
    • *
    • *

      * "CompilerOptions": {"signature_def_key": "serving_custom"} *

      *
    • *
    *
  • *
  • *

    * For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes in * DataInputConfig and the output tensor names for output_names in OutputConfig:CompilerOptions . For example: *

    *
      *
    • *

      * "DataInputConfig": {"input_tensor:0": [1, 224, 224, 3]} *

      *
    • *
    • *

      * "CompilerOptions": {"output_names": ["output_tensor:0"]} *

      *
    • *
    *
  • *
*/ private String dataInputConfig; /** *

* Identifies the framework in which the model was trained. For example: TENSORFLOW. *

*/ private String framework; /** *

* Specifies the framework version to use. This API field is only supported for the MXNet, PyTorch, TensorFlow and * TensorFlow Lite frameworks. *

*

* For information about framework versions supported for cloud targets and edge devices, see Cloud Supported Instance Types * and Frameworks and Edge Supported * Frameworks. *

*/ private String frameworkVersion; /** *

* The S3 path where the model artifacts, which result from model training, are stored. This path must point to a * single gzip compressed tar archive (.tar.gz suffix). *

* * @param s3Uri * The S3 path where the model artifacts, which result from model training, are stored. This path must point * to a single gzip compressed tar archive (.tar.gz suffix). */ public void setS3Uri(String s3Uri) { this.s3Uri = s3Uri; } /** *

* The S3 path where the model artifacts, which result from model training, are stored. This path must point to a * single gzip compressed tar archive (.tar.gz suffix). *

* * @return The S3 path where the model artifacts, which result from model training, are stored. This path must point * to a single gzip compressed tar archive (.tar.gz suffix). */ public String getS3Uri() { return this.s3Uri; } /** *

* The S3 path where the model artifacts, which result from model training, are stored. This path must point to a * single gzip compressed tar archive (.tar.gz suffix). *

* * @param s3Uri * The S3 path where the model artifacts, which result from model training, are stored. This path must point * to a single gzip compressed tar archive (.tar.gz suffix). * @return Returns a reference to this object so that method calls can be chained together. */ public InputConfig withS3Uri(String s3Uri) { setS3Uri(s3Uri); return this; } /** *

* Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The * data inputs are Framework specific. *

*
    *
  • *

    * TensorFlow: You must specify the name and shape (NHWC format) of the expected data inputs using a * dictionary format for your trained model. The dictionary formats required for the console and CLI are different. *

    *
      *
    • *

      * Examples for one input: *

      *
        *
      • *

        * If using the console, {"input":[1,1024,1024,3]} *

        *
      • *
      • *

        * If using the CLI, {\"input\":[1,1024,1024,3]} *

        *
      • *
      *
    • *
    • *

      * Examples for two inputs: *

      *
        *
      • *

        * If using the console, {"data1": [1,28,28,1], "data2":[1,28,28,1]} *

        *
      • *
      • *

        * If using the CLI, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]} *

        *
      • *
      *
    • *
    *
  • *
  • *

    * KERAS: You must specify the name and shape (NCHW format) of expected data inputs using a dictionary * format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC (channel-last) * format, DataInputConfig should be specified in NCHW (channel-first) format. The dictionary formats * required for the console and CLI are different. *

    *
      *
    • *

      * Examples for one input: *

      *
        *
      • *

        * If using the console, {"input_1":[1,3,224,224]} *

        *
      • *
      • *

        * If using the CLI, {\"input_1\":[1,3,224,224]} *

        *
      • *
      *
    • *
    • *

      * Examples for two inputs: *

      *
        *
      • *

        * If using the console, {"input_1": [1,3,224,224], "input_2":[1,3,224,224]} *

        *
      • *
      • *

        * If using the CLI, {\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} *

        *
      • *
      *
    • *
    *
  • *
  • *

    * MXNET/ONNX/DARKNET: You must specify the name and shape (NCHW format) of the expected data inputs in * order using a dictionary format for your trained model. The dictionary formats required for the console and CLI * are different. *

    *
      *
    • *

      * Examples for one input: *

      *
        *
      • *

        * If using the console, {"data":[1,3,1024,1024]} *

        *
      • *
      • *

        * If using the CLI, {\"data\":[1,3,1024,1024]} *

        *
      • *
      *
    • *
    • *

      * Examples for two inputs: *

      *
        *
      • *

        * If using the console, {"var1": [1,1,28,28], "var2":[1,1,28,28]} *

        *
      • *
      • *

        * If using the CLI, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} *

        *
      • *
      *
    • *
    *
  • *
  • *

    * PyTorch: You can either specify the name and shape (NCHW format) of expected data inputs in order * using a dictionary format for your trained model or you can specify the shape only using a list format. The * dictionary formats required for the console and CLI are different. The list formats for the console and CLI are * the same. *

    *
      *
    • *

      * Examples for one input in dictionary format: *

      *
        *
      • *

        * If using the console, {"input0":[1,3,224,224]} *

        *
      • *
      • *

        * If using the CLI, {\"input0\":[1,3,224,224]} *

        *
      • *
      *
    • *
    • *

      * Example for one input in list format: [[1,3,224,224]] *

      *
    • *
    • *

      * Examples for two inputs in dictionary format: *

      *
        *
      • *

        * If using the console, {"input0":[1,3,224,224], "input1":[1,3,224,224]} *

        *
      • *
      • *

        * If using the CLI, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]} *

        *
      • *
      *
    • *
    • *

      * Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]] *

      *
    • *
    *
  • *
  • *

    * XGBOOST: input data name and shape are not needed. *

    *
  • *
*

* DataInputConfig supports the following parameters for CoreML TargetDevice * (ML Model format): *

*
    *
  • *

    * shape: Input shape, for example {"input_1": {"shape": [1,224,224,3]}}. In addition to * static input shapes, CoreML converter supports Flexible input shapes: *

    *
      *
    • *

      * Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some specific * interval in that dimension, for example: {"input_1": {"shape": ["1..10", 224, 224, 3]}} *

      *
    • *
    • *

      * Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can enumerate * all supported input shapes, for example: * {"input_1": {"shape": [[1, 224, 224, 3], [1, 160, 160, 3]]}} *

      *
    • *
    *
  • *
  • *

    * default_shape: Default input shape. You can set a default shape during conversion for both Range * Dimension and Enumerated Shapes. For example * {"input_1": {"shape": ["1..10", 224, 224, 3], "default_shape": [1, 224, 224, 3]}} *

    *
  • *
  • *

    * type: Input type. Allowed values: Image and Tensor. By default, the * converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be Image. * Image input type requires additional input parameters such as bias and scale. *

    *
  • *
  • *

    * bias: If the input type is an Image, you need to provide the bias vector. *

    *
  • *
  • *

    * scale: If the input type is an Image, you need to provide a scale factor. *

    *
  • *
*

* CoreML ClassifierConfig parameters can be specified using OutputConfig * CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion * examples: *

*
    *
  • *

    * Tensor type input: *

    *
      *
    • *

      * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3]}} *

      *
    • *
    *
  • *
  • *

    * Tensor type input without input name (PyTorch): *

    *
      *
    • *

      * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224]}] *

      *
    • *
    *
  • *
  • *

    * Image type input: *

    *
      *
    • *

      * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}} *

      *
    • *
    • *

      * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

      *
    • *
    *
  • *
  • *

    * Image type input without input name (PyTorch): *

    *
      *
    • *

      * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}] *

      *
    • *
    • *

      * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

      *
    • *
    *
  • *
*

* Depending on the model format, DataInputConfig requires the following parameters for * ml_eia2 OutputConfig:TargetDevice. *

*
    *
  • *

    * For TensorFlow models saved in the SavedModel format, specify the input names from signature_def_key * and the input model shapes for DataInputConfig. Specify the signature_def_key in OutputConfig:CompilerOptions if the model does not use TensorFlow's default signature def * key. For example: *

    *
      *
    • *

      * "DataInputConfig": {"inputs": [1, 224, 224, 3]} *

      *
    • *
    • *

      * "CompilerOptions": {"signature_def_key": "serving_custom"} *

      *
    • *
    *
  • *
  • *

    * For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes in * DataInputConfig and the output tensor names for output_names in OutputConfig:CompilerOptions . For example: *

    *
      *
    • *

      * "DataInputConfig": {"input_tensor:0": [1, 224, 224, 3]} *

      *
    • *
    • *

      * "CompilerOptions": {"output_names": ["output_tensor:0"]} *

      *
    • *
    *
  • *
* * @param dataInputConfig * Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary * form. The data inputs are Framework specific.

*
    *
  • *

    * TensorFlow: You must specify the name and shape (NHWC format) of the expected data inputs * using a dictionary format for your trained model. The dictionary formats required for the console and CLI * are different. *

    *
      *
    • *

      * Examples for one input: *

      *
        *
      • *

        * If using the console, {"input":[1,1024,1024,3]} *

        *
      • *
      • *

        * If using the CLI, {\"input\":[1,1024,1024,3]} *

        *
      • *
      *
    • *
    • *

      * Examples for two inputs: *

      *
        *
      • *

        * If using the console, {"data1": [1,28,28,1], "data2":[1,28,28,1]} *

        *
      • *
      • *

        * If using the CLI, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]} *

        *
      • *
      *
    • *
    *
  • *
  • *

    * KERAS: You must specify the name and shape (NCHW format) of expected data inputs using a * dictionary format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC * (channel-last) format, DataInputConfig should be specified in NCHW (channel-first) format. * The dictionary formats required for the console and CLI are different. *

    *
      *
    • *

      * Examples for one input: *

      *
        *
      • *

        * If using the console, {"input_1":[1,3,224,224]} *

        *
      • *
      • *

        * If using the CLI, {\"input_1\":[1,3,224,224]} *

        *
      • *
      *
    • *
    • *

      * Examples for two inputs: *

      *
        *
      • *

        * If using the console, {"input_1": [1,3,224,224], "input_2":[1,3,224,224]} *

        *
      • *
      • *

        * If using the CLI, {\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} *

        *
      • *
      *
    • *
    *
  • *
  • *

    * MXNET/ONNX/DARKNET: You must specify the name and shape (NCHW format) of the expected data * inputs in order using a dictionary format for your trained model. The dictionary formats required for the * console and CLI are different. *

    *
      *
    • *

      * Examples for one input: *

      *
        *
      • *

        * If using the console, {"data":[1,3,1024,1024]} *

        *
      • *
      • *

        * If using the CLI, {\"data\":[1,3,1024,1024]} *

        *
      • *
      *
    • *
    • *

      * Examples for two inputs: *

      *
        *
      • *

        * If using the console, {"var1": [1,1,28,28], "var2":[1,1,28,28]} *

        *
      • *
      • *

        * If using the CLI, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} *

        *
      • *
      *
    • *
    *
  • *
  • *

    * PyTorch: You can either specify the name and shape (NCHW format) of expected data inputs in * order using a dictionary format for your trained model or you can specify the shape only using a list * format. The dictionary formats required for the console and CLI are different. The list formats for the * console and CLI are the same. *

    *
      *
    • *

      * Examples for one input in dictionary format: *

      *
        *
      • *

        * If using the console, {"input0":[1,3,224,224]} *

        *
      • *
      • *

        * If using the CLI, {\"input0\":[1,3,224,224]} *

        *
      • *
      *
    • *
    • *

      * Example for one input in list format: [[1,3,224,224]] *

      *
    • *
    • *

      * Examples for two inputs in dictionary format: *

      *
        *
      • *

        * If using the console, {"input0":[1,3,224,224], "input1":[1,3,224,224]} *

        *
      • *
      • *

        * If using the CLI, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]} *

        *
      • *
      *
    • *
    • *

      * Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]] *

      *
    • *
    *
  • *
  • *

    * XGBOOST: input data name and shape are not needed. *

    *
  • *
*

* DataInputConfig supports the following parameters for CoreML * TargetDevice (ML Model format): *

*
    *
  • *

    * shape: Input shape, for example {"input_1": {"shape": [1,224,224,3]}}. In * addition to static input shapes, CoreML converter supports Flexible input shapes: *

    *
      *
    • *

      * Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some * specific interval in that dimension, for example: * {"input_1": {"shape": ["1..10", 224, 224, 3]}} *

      *
    • *
    • *

      * Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can * enumerate all supported input shapes, for example: * {"input_1": {"shape": [[1, 224, 224, 3], [1, 160, 160, 3]]}} *

      *
    • *
    *
  • *
  • *

    * default_shape: Default input shape. You can set a default shape during conversion for both * Range Dimension and Enumerated Shapes. For example * {"input_1": {"shape": ["1..10", 224, 224, 3], "default_shape": [1, 224, 224, 3]}} *

    *
  • *
  • *

    * type: Input type. Allowed values: Image and Tensor. By default, the * converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be * Image. Image input type requires additional input parameters such as bias and * scale. *

    *
  • *
  • *

    * bias: If the input type is an Image, you need to provide the bias vector. *

    *
  • *
  • *

    * scale: If the input type is an Image, you need to provide a scale factor. *

    *
  • *
*

* CoreML ClassifierConfig parameters can be specified using OutputConfig * CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion * examples: *

*
    *
  • *

    * Tensor type input: *

    *
      *
    • *

      * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3]}} *

      *
    • *
    *
  • *
  • *

    * Tensor type input without input name (PyTorch): *

    *
      *
    • *

      * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224]}] *

      *
    • *
    *
  • *
  • *

    * Image type input: *

    *
      *
    • *

      * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}} *

      *
    • *
    • *

      * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

      *
    • *
    *
  • *
  • *

    * Image type input without input name (PyTorch): *

    *
      *
    • *

      * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}] *

      *
    • *
    • *

      * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

      *
    • *
    *
  • *
*

* Depending on the model format, DataInputConfig requires the following parameters for * ml_eia2 OutputConfig:TargetDevice. *

*
    *
  • *

    * For TensorFlow models saved in the SavedModel format, specify the input names from * signature_def_key and the input model shapes for DataInputConfig. Specify the * signature_def_key in OutputConfig:CompilerOptions if the model does not use TensorFlow's default signature * def key. For example: *

    *
      *
    • *

      * "DataInputConfig": {"inputs": [1, 224, 224, 3]} *

      *
    • *
    • *

      * "CompilerOptions": {"signature_def_key": "serving_custom"} *

      *
    • *
    *
  • *
  • *

    * For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes in * DataInputConfig and the output tensor names for output_names in OutputConfig:CompilerOptions . For example: *

    *
      *
    • *

      * "DataInputConfig": {"input_tensor:0": [1, 224, 224, 3]} *

      *
    • *
    • *

      * "CompilerOptions": {"output_names": ["output_tensor:0"]} *

      *
    • *
    *
  • */ public void setDataInputConfig(String dataInputConfig) { this.dataInputConfig = dataInputConfig; } /** *

    * Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The * data inputs are Framework specific. *

    *
      *
    • *

      * TensorFlow: You must specify the name and shape (NHWC format) of the expected data inputs using a * dictionary format for your trained model. The dictionary formats required for the console and CLI are different. *

      *
        *
      • *

        * Examples for one input: *

        *
          *
        • *

          * If using the console, {"input":[1,1024,1024,3]} *

          *
        • *
        • *

          * If using the CLI, {\"input\":[1,1024,1024,3]} *

          *
        • *
        *
      • *
      • *

        * Examples for two inputs: *

        *
          *
        • *

          * If using the console, {"data1": [1,28,28,1], "data2":[1,28,28,1]} *

          *
        • *
        • *

          * If using the CLI, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]} *

          *
        • *
        *
      • *
      *
    • *
    • *

      * KERAS: You must specify the name and shape (NCHW format) of expected data inputs using a dictionary * format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC (channel-last) * format, DataInputConfig should be specified in NCHW (channel-first) format. The dictionary formats * required for the console and CLI are different. *

      *
        *
      • *

        * Examples for one input: *

        *
          *
        • *

          * If using the console, {"input_1":[1,3,224,224]} *

          *
        • *
        • *

          * If using the CLI, {\"input_1\":[1,3,224,224]} *

          *
        • *
        *
      • *
      • *

        * Examples for two inputs: *

        *
          *
        • *

          * If using the console, {"input_1": [1,3,224,224], "input_2":[1,3,224,224]} *

          *
        • *
        • *

          * If using the CLI, {\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} *

          *
        • *
        *
      • *
      *
    • *
    • *

      * MXNET/ONNX/DARKNET: You must specify the name and shape (NCHW format) of the expected data inputs in * order using a dictionary format for your trained model. The dictionary formats required for the console and CLI * are different. *

      *
        *
      • *

        * Examples for one input: *

        *
          *
        • *

          * If using the console, {"data":[1,3,1024,1024]} *

          *
        • *
        • *

          * If using the CLI, {\"data\":[1,3,1024,1024]} *

          *
        • *
        *
      • *
      • *

        * Examples for two inputs: *

        *
          *
        • *

          * If using the console, {"var1": [1,1,28,28], "var2":[1,1,28,28]} *

          *
        • *
        • *

          * If using the CLI, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} *

          *
        • *
        *
      • *
      *
    • *
    • *

      * PyTorch: You can either specify the name and shape (NCHW format) of expected data inputs in order * using a dictionary format for your trained model or you can specify the shape only using a list format. The * dictionary formats required for the console and CLI are different. The list formats for the console and CLI are * the same. *

      *
        *
      • *

        * Examples for one input in dictionary format: *

        *
          *
        • *

          * If using the console, {"input0":[1,3,224,224]} *

          *
        • *
        • *

          * If using the CLI, {\"input0\":[1,3,224,224]} *

          *
        • *
        *
      • *
      • *

        * Example for one input in list format: [[1,3,224,224]] *

        *
      • *
      • *

        * Examples for two inputs in dictionary format: *

        *
          *
        • *

          * If using the console, {"input0":[1,3,224,224], "input1":[1,3,224,224]} *

          *
        • *
        • *

          * If using the CLI, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]} *

          *
        • *
        *
      • *
      • *

        * Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]] *

        *
      • *
      *
    • *
    • *

      * XGBOOST: input data name and shape are not needed. *

      *
    • *
    *

    * DataInputConfig supports the following parameters for CoreML TargetDevice * (ML Model format): *

    *
      *
    • *

      * shape: Input shape, for example {"input_1": {"shape": [1,224,224,3]}}. In addition to * static input shapes, CoreML converter supports Flexible input shapes: *

      *
        *
      • *

        * Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some specific * interval in that dimension, for example: {"input_1": {"shape": ["1..10", 224, 224, 3]}} *

        *
      • *
      • *

        * Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can enumerate * all supported input shapes, for example: * {"input_1": {"shape": [[1, 224, 224, 3], [1, 160, 160, 3]]}} *

        *
      • *
      *
    • *
    • *

      * default_shape: Default input shape. You can set a default shape during conversion for both Range * Dimension and Enumerated Shapes. For example * {"input_1": {"shape": ["1..10", 224, 224, 3], "default_shape": [1, 224, 224, 3]}} *

      *
    • *
    • *

      * type: Input type. Allowed values: Image and Tensor. By default, the * converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be Image. * Image input type requires additional input parameters such as bias and scale. *

      *
    • *
    • *

      * bias: If the input type is an Image, you need to provide the bias vector. *

      *
    • *
    • *

      * scale: If the input type is an Image, you need to provide a scale factor. *

      *
    • *
    *

    * CoreML ClassifierConfig parameters can be specified using OutputConfig * CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion * examples: *

    *
      *
    • *

      * Tensor type input: *

      *
        *
      • *

        * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3]}} *

        *
      • *
      *
    • *
    • *

      * Tensor type input without input name (PyTorch): *

      *
        *
      • *

        * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224]}] *

        *
      • *
      *
    • *
    • *

      * Image type input: *

      *
        *
      • *

        * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}} *

        *
      • *
      • *

        * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

        *
      • *
      *
    • *
    • *

      * Image type input without input name (PyTorch): *

      *
        *
      • *

        * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}] *

        *
      • *
      • *

        * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

        *
      • *
      *
    • *
    *

    * Depending on the model format, DataInputConfig requires the following parameters for * ml_eia2 OutputConfig:TargetDevice. *

    *
      *
    • *

      * For TensorFlow models saved in the SavedModel format, specify the input names from signature_def_key * and the input model shapes for DataInputConfig. Specify the signature_def_key in OutputConfig:CompilerOptions if the model does not use TensorFlow's default signature def * key. For example: *

      *
        *
      • *

        * "DataInputConfig": {"inputs": [1, 224, 224, 3]} *

        *
      • *
      • *

        * "CompilerOptions": {"signature_def_key": "serving_custom"} *

        *
      • *
      *
    • *
    • *

      * For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes in * DataInputConfig and the output tensor names for output_names in OutputConfig:CompilerOptions . For example: *

      *
        *
      • *

        * "DataInputConfig": {"input_tensor:0": [1, 224, 224, 3]} *

        *
      • *
      • *

        * "CompilerOptions": {"output_names": ["output_tensor:0"]} *

        *
      • *
      *
    • *
    * * @return Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary * form. The data inputs are Framework specific.

    *
      *
    • *

      * TensorFlow: You must specify the name and shape (NHWC format) of the expected data inputs * using a dictionary format for your trained model. The dictionary formats required for the console and CLI * are different. *

      *
        *
      • *

        * Examples for one input: *

        *
          *
        • *

          * If using the console, {"input":[1,1024,1024,3]} *

          *
        • *
        • *

          * If using the CLI, {\"input\":[1,1024,1024,3]} *

          *
        • *
        *
      • *
      • *

        * Examples for two inputs: *

        *
          *
        • *

          * If using the console, {"data1": [1,28,28,1], "data2":[1,28,28,1]} *

          *
        • *
        • *

          * If using the CLI, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]} *

          *
        • *
        *
      • *
      *
    • *
    • *

      * KERAS: You must specify the name and shape (NCHW format) of expected data inputs using a * dictionary format for your trained model. Note that while Keras model artifacts should be uploaded in * NHWC (channel-last) format, DataInputConfig should be specified in NCHW (channel-first) * format. The dictionary formats required for the console and CLI are different. *

      *
        *
      • *

        * Examples for one input: *

        *
          *
        • *

          * If using the console, {"input_1":[1,3,224,224]} *

          *
        • *
        • *

          * If using the CLI, {\"input_1\":[1,3,224,224]} *

          *
        • *
        *
      • *
      • *

        * Examples for two inputs: *

        *
          *
        • *

          * If using the console, {"input_1": [1,3,224,224], "input_2":[1,3,224,224]} *

          *
        • *
        • *

          * If using the CLI, {\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} *

          *
        • *
        *
      • *
      *
    • *
    • *

      * MXNET/ONNX/DARKNET: You must specify the name and shape (NCHW format) of the expected data * inputs in order using a dictionary format for your trained model. The dictionary formats required for the * console and CLI are different. *

      *
        *
      • *

        * Examples for one input: *

        *
          *
        • *

          * If using the console, {"data":[1,3,1024,1024]} *

          *
        • *
        • *

          * If using the CLI, {\"data\":[1,3,1024,1024]} *

          *
        • *
        *
      • *
      • *

        * Examples for two inputs: *

        *
          *
        • *

          * If using the console, {"var1": [1,1,28,28], "var2":[1,1,28,28]} *

          *
        • *
        • *

          * If using the CLI, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} *

          *
        • *
        *
      • *
      *
    • *
    • *

      * PyTorch: You can either specify the name and shape (NCHW format) of expected data inputs in * order using a dictionary format for your trained model or you can specify the shape only using a list * format. The dictionary formats required for the console and CLI are different. The list formats for the * console and CLI are the same. *

      *
        *
      • *

        * Examples for one input in dictionary format: *

        *
          *
        • *

          * If using the console, {"input0":[1,3,224,224]} *

          *
        • *
        • *

          * If using the CLI, {\"input0\":[1,3,224,224]} *

          *
        • *
        *
      • *
      • *

        * Example for one input in list format: [[1,3,224,224]] *

        *
      • *
      • *

        * Examples for two inputs in dictionary format: *

        *
          *
        • *

          * If using the console, {"input0":[1,3,224,224], "input1":[1,3,224,224]} *

          *
        • *
        • *

          * If using the CLI, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]} *

          *
        • *
        *
      • *
      • *

        * Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]] *

        *
      • *
      *
    • *
    • *

      * XGBOOST: input data name and shape are not needed. *

      *
    • *
    *

    * DataInputConfig supports the following parameters for CoreML * TargetDevice (ML Model format): *

    *
      *
    • *

      * shape: Input shape, for example {"input_1": {"shape": [1,224,224,3]}}. In * addition to static input shapes, CoreML converter supports Flexible input shapes: *

      *
        *
      • *

        * Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some * specific interval in that dimension, for example: * {"input_1": {"shape": ["1..10", 224, 224, 3]}} *

        *
      • *
      • *

        * Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can * enumerate all supported input shapes, for example: * {"input_1": {"shape": [[1, 224, 224, 3], [1, 160, 160, 3]]}} *

        *
      • *
      *
    • *
    • *

      * default_shape: Default input shape. You can set a default shape during conversion for both * Range Dimension and Enumerated Shapes. For example * {"input_1": {"shape": ["1..10", 224, 224, 3], "default_shape": [1, 224, 224, 3]}} *

      *
    • *
    • *

      * type: Input type. Allowed values: Image and Tensor. By default, * the converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to * be Image. Image input type requires additional input parameters such as bias and * scale. *

      *
    • *
    • *

      * bias: If the input type is an Image, you need to provide the bias vector. *

      *
    • *
    • *

      * scale: If the input type is an Image, you need to provide a scale factor. *

      *
    • *
    *

    * CoreML ClassifierConfig parameters can be specified using OutputConfig * CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion * examples: *

    *
      *
    • *

      * Tensor type input: *

      *
        *
      • *

        * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3]}} *

        *
      • *
      *
    • *
    • *

      * Tensor type input without input name (PyTorch): *

      *
        *
      • *

        * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224]}] *

        *
      • *
      *
    • *
    • *

      * Image type input: *

      *
        *
      • *

        * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}} *

        *
      • *
      • *

        * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

        *
      • *
      *
    • *
    • *

      * Image type input without input name (PyTorch): *

      *
        *
      • *

        * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}] *

        *
      • *
      • *

        * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

        *
      • *
      *
    • *
    *

    * Depending on the model format, DataInputConfig requires the following parameters for * ml_eia2 OutputConfig:TargetDevice. *

    *
      *
    • *

      * For TensorFlow models saved in the SavedModel format, specify the input names from * signature_def_key and the input model shapes for DataInputConfig. Specify the * signature_def_key in OutputConfig:CompilerOptions if the model does not use TensorFlow's default signature * def key. For example: *

      *
        *
      • *

        * "DataInputConfig": {"inputs": [1, 224, 224, 3]} *

        *
      • *
      • *

        * "CompilerOptions": {"signature_def_key": "serving_custom"} *

        *
      • *
      *
    • *
    • *

      * For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes in * DataInputConfig and the output tensor names for output_names in OutputConfig:CompilerOptions . For example: *

      *
        *
      • *

        * "DataInputConfig": {"input_tensor:0": [1, 224, 224, 3]} *

        *
      • *
      • *

        * "CompilerOptions": {"output_names": ["output_tensor:0"]} *

        *
      • *
      *
    • */ public String getDataInputConfig() { return this.dataInputConfig; } /** *

      * Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The * data inputs are Framework specific. *

      *
        *
      • *

        * TensorFlow: You must specify the name and shape (NHWC format) of the expected data inputs using a * dictionary format for your trained model. The dictionary formats required for the console and CLI are different. *

        *
          *
        • *

          * Examples for one input: *

          *
            *
          • *

            * If using the console, {"input":[1,1024,1024,3]} *

            *
          • *
          • *

            * If using the CLI, {\"input\":[1,1024,1024,3]} *

            *
          • *
          *
        • *
        • *

          * Examples for two inputs: *

          *
            *
          • *

            * If using the console, {"data1": [1,28,28,1], "data2":[1,28,28,1]} *

            *
          • *
          • *

            * If using the CLI, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]} *

            *
          • *
          *
        • *
        *
      • *
      • *

        * KERAS: You must specify the name and shape (NCHW format) of expected data inputs using a dictionary * format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC (channel-last) * format, DataInputConfig should be specified in NCHW (channel-first) format. The dictionary formats * required for the console and CLI are different. *

        *
          *
        • *

          * Examples for one input: *

          *
            *
          • *

            * If using the console, {"input_1":[1,3,224,224]} *

            *
          • *
          • *

            * If using the CLI, {\"input_1\":[1,3,224,224]} *

            *
          • *
          *
        • *
        • *

          * Examples for two inputs: *

          *
            *
          • *

            * If using the console, {"input_1": [1,3,224,224], "input_2":[1,3,224,224]} *

            *
          • *
          • *

            * If using the CLI, {\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} *

            *
          • *
          *
        • *
        *
      • *
      • *

        * MXNET/ONNX/DARKNET: You must specify the name and shape (NCHW format) of the expected data inputs in * order using a dictionary format for your trained model. The dictionary formats required for the console and CLI * are different. *

        *
          *
        • *

          * Examples for one input: *

          *
            *
          • *

            * If using the console, {"data":[1,3,1024,1024]} *

            *
          • *
          • *

            * If using the CLI, {\"data\":[1,3,1024,1024]} *

            *
          • *
          *
        • *
        • *

          * Examples for two inputs: *

          *
            *
          • *

            * If using the console, {"var1": [1,1,28,28], "var2":[1,1,28,28]} *

            *
          • *
          • *

            * If using the CLI, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} *

            *
          • *
          *
        • *
        *
      • *
      • *

        * PyTorch: You can either specify the name and shape (NCHW format) of expected data inputs in order * using a dictionary format for your trained model or you can specify the shape only using a list format. The * dictionary formats required for the console and CLI are different. The list formats for the console and CLI are * the same. *

        *
          *
        • *

          * Examples for one input in dictionary format: *

          *
            *
          • *

            * If using the console, {"input0":[1,3,224,224]} *

            *
          • *
          • *

            * If using the CLI, {\"input0\":[1,3,224,224]} *

            *
          • *
          *
        • *
        • *

          * Example for one input in list format: [[1,3,224,224]] *

          *
        • *
        • *

          * Examples for two inputs in dictionary format: *

          *
            *
          • *

            * If using the console, {"input0":[1,3,224,224], "input1":[1,3,224,224]} *

            *
          • *
          • *

            * If using the CLI, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]} *

            *
          • *
          *
        • *
        • *

          * Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]] *

          *
        • *
        *
      • *
      • *

        * XGBOOST: input data name and shape are not needed. *

        *
      • *
      *

      * DataInputConfig supports the following parameters for CoreML TargetDevice * (ML Model format): *

      *
        *
      • *

        * shape: Input shape, for example {"input_1": {"shape": [1,224,224,3]}}. In addition to * static input shapes, CoreML converter supports Flexible input shapes: *

        *
          *
        • *

          * Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some specific * interval in that dimension, for example: {"input_1": {"shape": ["1..10", 224, 224, 3]}} *

          *
        • *
        • *

          * Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can enumerate * all supported input shapes, for example: * {"input_1": {"shape": [[1, 224, 224, 3], [1, 160, 160, 3]]}} *

          *
        • *
        *
      • *
      • *

        * default_shape: Default input shape. You can set a default shape during conversion for both Range * Dimension and Enumerated Shapes. For example * {"input_1": {"shape": ["1..10", 224, 224, 3], "default_shape": [1, 224, 224, 3]}} *

        *
      • *
      • *

        * type: Input type. Allowed values: Image and Tensor. By default, the * converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be Image. * Image input type requires additional input parameters such as bias and scale. *

        *
      • *
      • *

        * bias: If the input type is an Image, you need to provide the bias vector. *

        *
      • *
      • *

        * scale: If the input type is an Image, you need to provide a scale factor. *

        *
      • *
      *

      * CoreML ClassifierConfig parameters can be specified using OutputConfig * CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion * examples: *

      *
        *
      • *

        * Tensor type input: *

        *
          *
        • *

          * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3]}} *

          *
        • *
        *
      • *
      • *

        * Tensor type input without input name (PyTorch): *

        *
          *
        • *

          * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224]}] *

          *
        • *
        *
      • *
      • *

        * Image type input: *

        *
          *
        • *

          * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}} *

          *
        • *
        • *

          * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

          *
        • *
        *
      • *
      • *

        * Image type input without input name (PyTorch): *

        *
          *
        • *

          * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}] *

          *
        • *
        • *

          * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

          *
        • *
        *
      • *
      *

      * Depending on the model format, DataInputConfig requires the following parameters for * ml_eia2 OutputConfig:TargetDevice. *

      *
        *
      • *

        * For TensorFlow models saved in the SavedModel format, specify the input names from signature_def_key * and the input model shapes for DataInputConfig. Specify the signature_def_key in OutputConfig:CompilerOptions if the model does not use TensorFlow's default signature def * key. For example: *

        *
          *
        • *

          * "DataInputConfig": {"inputs": [1, 224, 224, 3]} *

          *
        • *
        • *

          * "CompilerOptions": {"signature_def_key": "serving_custom"} *

          *
        • *
        *
      • *
      • *

        * For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes in * DataInputConfig and the output tensor names for output_names in OutputConfig:CompilerOptions . For example: *

        *
          *
        • *

          * "DataInputConfig": {"input_tensor:0": [1, 224, 224, 3]} *

          *
        • *
        • *

          * "CompilerOptions": {"output_names": ["output_tensor:0"]} *

          *
        • *
        *
      • *
      * * @param dataInputConfig * Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary * form. The data inputs are Framework specific.

      *
        *
      • *

        * TensorFlow: You must specify the name and shape (NHWC format) of the expected data inputs * using a dictionary format for your trained model. The dictionary formats required for the console and CLI * are different. *

        *
          *
        • *

          * Examples for one input: *

          *
            *
          • *

            * If using the console, {"input":[1,1024,1024,3]} *

            *
          • *
          • *

            * If using the CLI, {\"input\":[1,1024,1024,3]} *

            *
          • *
          *
        • *
        • *

          * Examples for two inputs: *

          *
            *
          • *

            * If using the console, {"data1": [1,28,28,1], "data2":[1,28,28,1]} *

            *
          • *
          • *

            * If using the CLI, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]} *

            *
          • *
          *
        • *
        *
      • *
      • *

        * KERAS: You must specify the name and shape (NCHW format) of expected data inputs using a * dictionary format for your trained model. Note that while Keras model artifacts should be uploaded in NHWC * (channel-last) format, DataInputConfig should be specified in NCHW (channel-first) format. * The dictionary formats required for the console and CLI are different. *

        *
          *
        • *

          * Examples for one input: *

          *
            *
          • *

            * If using the console, {"input_1":[1,3,224,224]} *

            *
          • *
          • *

            * If using the CLI, {\"input_1\":[1,3,224,224]} *

            *
          • *
          *
        • *
        • *

          * Examples for two inputs: *

          *
            *
          • *

            * If using the console, {"input_1": [1,3,224,224], "input_2":[1,3,224,224]} *

            *
          • *
          • *

            * If using the CLI, {\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} *

            *
          • *
          *
        • *
        *
      • *
      • *

        * MXNET/ONNX/DARKNET: You must specify the name and shape (NCHW format) of the expected data * inputs in order using a dictionary format for your trained model. The dictionary formats required for the * console and CLI are different. *

        *
          *
        • *

          * Examples for one input: *

          *
            *
          • *

            * If using the console, {"data":[1,3,1024,1024]} *

            *
          • *
          • *

            * If using the CLI, {\"data\":[1,3,1024,1024]} *

            *
          • *
          *
        • *
        • *

          * Examples for two inputs: *

          *
            *
          • *

            * If using the console, {"var1": [1,1,28,28], "var2":[1,1,28,28]} *

            *
          • *
          • *

            * If using the CLI, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} *

            *
          • *
          *
        • *
        *
      • *
      • *

        * PyTorch: You can either specify the name and shape (NCHW format) of expected data inputs in * order using a dictionary format for your trained model or you can specify the shape only using a list * format. The dictionary formats required for the console and CLI are different. The list formats for the * console and CLI are the same. *

        *
          *
        • *

          * Examples for one input in dictionary format: *

          *
            *
          • *

            * If using the console, {"input0":[1,3,224,224]} *

            *
          • *
          • *

            * If using the CLI, {\"input0\":[1,3,224,224]} *

            *
          • *
          *
        • *
        • *

          * Example for one input in list format: [[1,3,224,224]] *

          *
        • *
        • *

          * Examples for two inputs in dictionary format: *

          *
            *
          • *

            * If using the console, {"input0":[1,3,224,224], "input1":[1,3,224,224]} *

            *
          • *
          • *

            * If using the CLI, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]} *

            *
          • *
          *
        • *
        • *

          * Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]] *

          *
        • *
        *
      • *
      • *

        * XGBOOST: input data name and shape are not needed. *

        *
      • *
      *

      * DataInputConfig supports the following parameters for CoreML * TargetDevice (ML Model format): *

      *
        *
      • *

        * shape: Input shape, for example {"input_1": {"shape": [1,224,224,3]}}. In * addition to static input shapes, CoreML converter supports Flexible input shapes: *

        *
          *
        • *

          * Range Dimension. You can use the Range Dimension feature if you know the input shape will be within some * specific interval in that dimension, for example: * {"input_1": {"shape": ["1..10", 224, 224, 3]}} *

          *
        • *
        • *

          * Enumerated shapes. Sometimes, the models are trained to work only on a select set of inputs. You can * enumerate all supported input shapes, for example: * {"input_1": {"shape": [[1, 224, 224, 3], [1, 160, 160, 3]]}} *

          *
        • *
        *
      • *
      • *

        * default_shape: Default input shape. You can set a default shape during conversion for both * Range Dimension and Enumerated Shapes. For example * {"input_1": {"shape": ["1..10", 224, 224, 3], "default_shape": [1, 224, 224, 3]}} *

        *
      • *
      • *

        * type: Input type. Allowed values: Image and Tensor. By default, the * converter generates an ML Model with inputs of type Tensor (MultiArray). User can set input type to be * Image. Image input type requires additional input parameters such as bias and * scale. *

        *
      • *
      • *

        * bias: If the input type is an Image, you need to provide the bias vector. *

        *
      • *
      • *

        * scale: If the input type is an Image, you need to provide a scale factor. *

        *
      • *
      *

      * CoreML ClassifierConfig parameters can be specified using OutputConfig * CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion * examples: *

      *
        *
      • *

        * Tensor type input: *

        *
          *
        • *

          * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3]}} *

          *
        • *
        *
      • *
      • *

        * Tensor type input without input name (PyTorch): *

        *
          *
        • *

          * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224]}] *

          *
        • *
        *
      • *
      • *

        * Image type input: *

        *
          *
        • *

          * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], "default_shape": [1,224,224,3], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}} *

          *
        • *
        • *

          * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

          *
        • *
        *
      • *
      • *

        * Image type input without input name (PyTorch): *

        *
          *
        • *

          * "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}] *

          *
        • *
        • *

          * "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} *

          *
        • *
        *
      • *
      *

      * Depending on the model format, DataInputConfig requires the following parameters for * ml_eia2 OutputConfig:TargetDevice. *

      *
        *
      • *

        * For TensorFlow models saved in the SavedModel format, specify the input names from * signature_def_key and the input model shapes for DataInputConfig. Specify the * signature_def_key in OutputConfig:CompilerOptions if the model does not use TensorFlow's default signature * def key. For example: *

        *
          *
        • *

          * "DataInputConfig": {"inputs": [1, 224, 224, 3]} *

          *
        • *
        • *

          * "CompilerOptions": {"signature_def_key": "serving_custom"} *

          *
        • *
        *
      • *
      • *

        * For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes in * DataInputConfig and the output tensor names for output_names in OutputConfig:CompilerOptions . For example: *

        *
          *
        • *

          * "DataInputConfig": {"input_tensor:0": [1, 224, 224, 3]} *

          *
        • *
        • *

          * "CompilerOptions": {"output_names": ["output_tensor:0"]} *

          *
        • *
        *
      • * @return Returns a reference to this object so that method calls can be chained together. */ public InputConfig withDataInputConfig(String dataInputConfig) { setDataInputConfig(dataInputConfig); return this; } /** *

        * Identifies the framework in which the model was trained. For example: TENSORFLOW. *

        * * @param framework * Identifies the framework in which the model was trained. For example: TENSORFLOW. * @see Framework */ public void setFramework(String framework) { this.framework = framework; } /** *

        * Identifies the framework in which the model was trained. For example: TENSORFLOW. *

        * * @return Identifies the framework in which the model was trained. For example: TENSORFLOW. * @see Framework */ public String getFramework() { return this.framework; } /** *

        * Identifies the framework in which the model was trained. For example: TENSORFLOW. *

        * * @param framework * Identifies the framework in which the model was trained. For example: TENSORFLOW. * @return Returns a reference to this object so that method calls can be chained together. * @see Framework */ public InputConfig withFramework(String framework) { setFramework(framework); return this; } /** *

        * Identifies the framework in which the model was trained. For example: TENSORFLOW. *

        * * @param framework * Identifies the framework in which the model was trained. For example: TENSORFLOW. * @return Returns a reference to this object so that method calls can be chained together. * @see Framework */ public InputConfig withFramework(Framework framework) { this.framework = framework.toString(); return this; } /** *

        * Specifies the framework version to use. This API field is only supported for the MXNet, PyTorch, TensorFlow and * TensorFlow Lite frameworks. *

        *

        * For information about framework versions supported for cloud targets and edge devices, see Cloud Supported Instance Types * and Frameworks and Edge Supported * Frameworks. *

        * * @param frameworkVersion * Specifies the framework version to use. This API field is only supported for the MXNet, PyTorch, * TensorFlow and TensorFlow Lite frameworks.

        *

        * For information about framework versions supported for cloud targets and edge devices, see Cloud Supported Instance * Types and Frameworks and Edge * Supported Frameworks. */ public void setFrameworkVersion(String frameworkVersion) { this.frameworkVersion = frameworkVersion; } /** *

        * Specifies the framework version to use. This API field is only supported for the MXNet, PyTorch, TensorFlow and * TensorFlow Lite frameworks. *

        *

        * For information about framework versions supported for cloud targets and edge devices, see Cloud Supported Instance Types * and Frameworks and Edge Supported * Frameworks. *

        * * @return Specifies the framework version to use. This API field is only supported for the MXNet, PyTorch, * TensorFlow and TensorFlow Lite frameworks.

        *

        * For information about framework versions supported for cloud targets and edge devices, see Cloud Supported Instance * Types and Frameworks and Edge * Supported Frameworks. */ public String getFrameworkVersion() { return this.frameworkVersion; } /** *

        * Specifies the framework version to use. This API field is only supported for the MXNet, PyTorch, TensorFlow and * TensorFlow Lite frameworks. *

        *

        * For information about framework versions supported for cloud targets and edge devices, see Cloud Supported Instance Types * and Frameworks and Edge Supported * Frameworks. *

        * * @param frameworkVersion * Specifies the framework version to use. This API field is only supported for the MXNet, PyTorch, * TensorFlow and TensorFlow Lite frameworks.

        *

        * For information about framework versions supported for cloud targets and edge devices, see Cloud Supported Instance * Types and Frameworks and Edge * Supported Frameworks. * @return Returns a reference to this object so that method calls can be chained together. */ public InputConfig withFrameworkVersion(String frameworkVersion) { setFrameworkVersion(frameworkVersion); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getS3Uri() != null) sb.append("S3Uri: ").append(getS3Uri()).append(","); if (getDataInputConfig() != null) sb.append("DataInputConfig: ").append(getDataInputConfig()).append(","); if (getFramework() != null) sb.append("Framework: ").append(getFramework()).append(","); if (getFrameworkVersion() != null) sb.append("FrameworkVersion: ").append(getFrameworkVersion()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof InputConfig == false) return false; InputConfig other = (InputConfig) obj; if (other.getS3Uri() == null ^ this.getS3Uri() == null) return false; if (other.getS3Uri() != null && other.getS3Uri().equals(this.getS3Uri()) == false) return false; if (other.getDataInputConfig() == null ^ this.getDataInputConfig() == null) return false; if (other.getDataInputConfig() != null && other.getDataInputConfig().equals(this.getDataInputConfig()) == false) return false; if (other.getFramework() == null ^ this.getFramework() == null) return false; if (other.getFramework() != null && other.getFramework().equals(this.getFramework()) == false) return false; if (other.getFrameworkVersion() == null ^ this.getFrameworkVersion() == null) return false; if (other.getFrameworkVersion() != null && other.getFrameworkVersion().equals(this.getFrameworkVersion()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getS3Uri() == null) ? 0 : getS3Uri().hashCode()); hashCode = prime * hashCode + ((getDataInputConfig() == null) ? 0 : getDataInputConfig().hashCode()); hashCode = prime * hashCode + ((getFramework() == null) ? 0 : getFramework().hashCode()); hashCode = prime * hashCode + ((getFrameworkVersion() == null) ? 0 : getFrameworkVersion().hashCode()); return hashCode; } @Override public InputConfig clone() { try { return (InputConfig) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.sagemaker.model.transform.InputConfigMarshaller.getInstance().marshall(this, protocolMarshaller); } }





© 2015 - 2025 Weber Informatics LLC | Privacy Policy