All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.amazonaws.services.lookoutforvision.model.StartModelRequest Maven / Gradle / Ivy

Go to download

The AWS Java SDK for Amazon Lookout for Vision module holds the client classes that are used for communicating with Amazon Lookout for Vision Service

There is a newer version: 1.12.772
Show newest version
/*
 * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
 * 
 * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
 * the License. A copy of the License is located at
 * 
 * http://aws.amazon.com/apache2.0
 * 
 * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
 * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
 * and limitations under the License.
 */
package com.amazonaws.services.lookoutforvision.model;

import java.io.Serializable;
import javax.annotation.Generated;

import com.amazonaws.AmazonWebServiceRequest;

/**
 * 
 * @see AWS API
 *      Documentation
 */
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class StartModelRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {

    /**
     * 

* The name of the project that contains the model that you want to start. *

*/ private String projectName; /** *

* The version of the model that you want to start. *

*/ private String modelVersion; /** *

* The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a * higher number to increase the TPS throughput of your model. You are charged for the number of inference units * that you use. *

*/ private Integer minInferenceUnits; /** *

* ClientToken is an idempotency token that ensures a call to StartModel completes only once. You * choose the value to pass. For example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to StartModel by using the same * ClientToken parameter value. *

*

* If you don't supply a value for ClientToken, the AWS SDK you are using inserts a value for you. This * prevents retries after a network error from making multiple start requests. You'll need to provide your own value * for other use cases. *

*

* An error occurs if the other input parameters are not the same as in the first request. Using a different value * for ClientToken is considered a new call to StartModel. An idempotency token is active * for 8 hours. *

*/ private String clientToken; /** *

* The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon * Lookout for Vision doesn't auto-scale the model. *

*/ private Integer maxInferenceUnits; /** *

* The name of the project that contains the model that you want to start. *

* * @param projectName * The name of the project that contains the model that you want to start. */ public void setProjectName(String projectName) { this.projectName = projectName; } /** *

* The name of the project that contains the model that you want to start. *

* * @return The name of the project that contains the model that you want to start. */ public String getProjectName() { return this.projectName; } /** *

* The name of the project that contains the model that you want to start. *

* * @param projectName * The name of the project that contains the model that you want to start. * @return Returns a reference to this object so that method calls can be chained together. */ public StartModelRequest withProjectName(String projectName) { setProjectName(projectName); return this; } /** *

* The version of the model that you want to start. *

* * @param modelVersion * The version of the model that you want to start. */ public void setModelVersion(String modelVersion) { this.modelVersion = modelVersion; } /** *

* The version of the model that you want to start. *

* * @return The version of the model that you want to start. */ public String getModelVersion() { return this.modelVersion; } /** *

* The version of the model that you want to start. *

* * @param modelVersion * The version of the model that you want to start. * @return Returns a reference to this object so that method calls can be chained together. */ public StartModelRequest withModelVersion(String modelVersion) { setModelVersion(modelVersion); return this; } /** *

* The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a * higher number to increase the TPS throughput of your model. You are charged for the number of inference units * that you use. *

* * @param minInferenceUnits * The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use * a higher number to increase the TPS throughput of your model. You are charged for the number of inference * units that you use. */ public void setMinInferenceUnits(Integer minInferenceUnits) { this.minInferenceUnits = minInferenceUnits; } /** *

* The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a * higher number to increase the TPS throughput of your model. You are charged for the number of inference units * that you use. *

* * @return The minimum number of inference units to use. A single inference unit represents 1 hour of processing. * Use a higher number to increase the TPS throughput of your model. You are charged for the number of * inference units that you use. */ public Integer getMinInferenceUnits() { return this.minInferenceUnits; } /** *

* The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use a * higher number to increase the TPS throughput of your model. You are charged for the number of inference units * that you use. *

* * @param minInferenceUnits * The minimum number of inference units to use. A single inference unit represents 1 hour of processing. Use * a higher number to increase the TPS throughput of your model. You are charged for the number of inference * units that you use. * @return Returns a reference to this object so that method calls can be chained together. */ public StartModelRequest withMinInferenceUnits(Integer minInferenceUnits) { setMinInferenceUnits(minInferenceUnits); return this; } /** *

* ClientToken is an idempotency token that ensures a call to StartModel completes only once. You * choose the value to pass. For example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to StartModel by using the same * ClientToken parameter value. *

*

* If you don't supply a value for ClientToken, the AWS SDK you are using inserts a value for you. This * prevents retries after a network error from making multiple start requests. You'll need to provide your own value * for other use cases. *

*

* An error occurs if the other input parameters are not the same as in the first request. Using a different value * for ClientToken is considered a new call to StartModel. An idempotency token is active * for 8 hours. *

* * @param clientToken * ClientToken is an idempotency token that ensures a call to StartModel completes only once. * You choose the value to pass. For example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to StartModel by using the same * ClientToken parameter value.

*

* If you don't supply a value for ClientToken, the AWS SDK you are using inserts a value for * you. This prevents retries after a network error from making multiple start requests. You'll need to * provide your own value for other use cases. *

*

* An error occurs if the other input parameters are not the same as in the first request. Using a different * value for ClientToken is considered a new call to StartModel. An idempotency * token is active for 8 hours. */ public void setClientToken(String clientToken) { this.clientToken = clientToken; } /** *

* ClientToken is an idempotency token that ensures a call to StartModel completes only once. You * choose the value to pass. For example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to StartModel by using the same * ClientToken parameter value. *

*

* If you don't supply a value for ClientToken, the AWS SDK you are using inserts a value for you. This * prevents retries after a network error from making multiple start requests. You'll need to provide your own value * for other use cases. *

*

* An error occurs if the other input parameters are not the same as in the first request. Using a different value * for ClientToken is considered a new call to StartModel. An idempotency token is active * for 8 hours. *

* * @return ClientToken is an idempotency token that ensures a call to StartModel completes only once. * You choose the value to pass. For example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to StartModel by using the * same ClientToken parameter value.

*

* If you don't supply a value for ClientToken, the AWS SDK you are using inserts a value for * you. This prevents retries after a network error from making multiple start requests. You'll need to * provide your own value for other use cases. *

*

* An error occurs if the other input parameters are not the same as in the first request. Using a different * value for ClientToken is considered a new call to StartModel. An idempotency * token is active for 8 hours. */ public String getClientToken() { return this.clientToken; } /** *

* ClientToken is an idempotency token that ensures a call to StartModel completes only once. You * choose the value to pass. For example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to StartModel by using the same * ClientToken parameter value. *

*

* If you don't supply a value for ClientToken, the AWS SDK you are using inserts a value for you. This * prevents retries after a network error from making multiple start requests. You'll need to provide your own value * for other use cases. *

*

* An error occurs if the other input parameters are not the same as in the first request. Using a different value * for ClientToken is considered a new call to StartModel. An idempotency token is active * for 8 hours. *

* * @param clientToken * ClientToken is an idempotency token that ensures a call to StartModel completes only once. * You choose the value to pass. For example, An issue might prevent you from getting a response from * StartModel. In this case, safely retry your call to StartModel by using the same * ClientToken parameter value.

*

* If you don't supply a value for ClientToken, the AWS SDK you are using inserts a value for * you. This prevents retries after a network error from making multiple start requests. You'll need to * provide your own value for other use cases. *

*

* An error occurs if the other input parameters are not the same as in the first request. Using a different * value for ClientToken is considered a new call to StartModel. An idempotency * token is active for 8 hours. * @return Returns a reference to this object so that method calls can be chained together. */ public StartModelRequest withClientToken(String clientToken) { setClientToken(clientToken); return this; } /** *

* The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon * Lookout for Vision doesn't auto-scale the model. *

* * @param maxInferenceUnits * The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, * Amazon Lookout for Vision doesn't auto-scale the model. */ public void setMaxInferenceUnits(Integer maxInferenceUnits) { this.maxInferenceUnits = maxInferenceUnits; } /** *

* The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon * Lookout for Vision doesn't auto-scale the model. *

* * @return The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, * Amazon Lookout for Vision doesn't auto-scale the model. */ public Integer getMaxInferenceUnits() { return this.maxInferenceUnits; } /** *

* The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, Amazon * Lookout for Vision doesn't auto-scale the model. *

* * @param maxInferenceUnits * The maximum number of inference units to use for auto-scaling the model. If you don't specify a value, * Amazon Lookout for Vision doesn't auto-scale the model. * @return Returns a reference to this object so that method calls can be chained together. */ public StartModelRequest withMaxInferenceUnits(Integer maxInferenceUnits) { setMaxInferenceUnits(maxInferenceUnits); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getProjectName() != null) sb.append("ProjectName: ").append(getProjectName()).append(","); if (getModelVersion() != null) sb.append("ModelVersion: ").append(getModelVersion()).append(","); if (getMinInferenceUnits() != null) sb.append("MinInferenceUnits: ").append(getMinInferenceUnits()).append(","); if (getClientToken() != null) sb.append("ClientToken: ").append(getClientToken()).append(","); if (getMaxInferenceUnits() != null) sb.append("MaxInferenceUnits: ").append(getMaxInferenceUnits()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof StartModelRequest == false) return false; StartModelRequest other = (StartModelRequest) obj; if (other.getProjectName() == null ^ this.getProjectName() == null) return false; if (other.getProjectName() != null && other.getProjectName().equals(this.getProjectName()) == false) return false; if (other.getModelVersion() == null ^ this.getModelVersion() == null) return false; if (other.getModelVersion() != null && other.getModelVersion().equals(this.getModelVersion()) == false) return false; if (other.getMinInferenceUnits() == null ^ this.getMinInferenceUnits() == null) return false; if (other.getMinInferenceUnits() != null && other.getMinInferenceUnits().equals(this.getMinInferenceUnits()) == false) return false; if (other.getClientToken() == null ^ this.getClientToken() == null) return false; if (other.getClientToken() != null && other.getClientToken().equals(this.getClientToken()) == false) return false; if (other.getMaxInferenceUnits() == null ^ this.getMaxInferenceUnits() == null) return false; if (other.getMaxInferenceUnits() != null && other.getMaxInferenceUnits().equals(this.getMaxInferenceUnits()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getProjectName() == null) ? 0 : getProjectName().hashCode()); hashCode = prime * hashCode + ((getModelVersion() == null) ? 0 : getModelVersion().hashCode()); hashCode = prime * hashCode + ((getMinInferenceUnits() == null) ? 0 : getMinInferenceUnits().hashCode()); hashCode = prime * hashCode + ((getClientToken() == null) ? 0 : getClientToken().hashCode()); hashCode = prime * hashCode + ((getMaxInferenceUnits() == null) ? 0 : getMaxInferenceUnits().hashCode()); return hashCode; } @Override public StartModelRequest clone() { return (StartModelRequest) super.clone(); } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy