All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.amazonaws.services.neptunedata.model.CreateMLEndpointRequest Maven / Gradle / Ivy

Go to download

The AWS Java SDK for Amazon NeptuneData module holds the client classes that are used for communicating with Amazon NeptuneData Service

There is a newer version: 1.12.778
Show newest version
/*
 * Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
 * 
 * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
 * the License. A copy of the License is located at
 * 
 * http://aws.amazon.com/apache2.0
 * 
 * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
 * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
 * and limitations under the License.
 */
package com.amazonaws.services.neptunedata.model;

import java.io.Serializable;
import javax.annotation.Generated;

import com.amazonaws.AmazonWebServiceRequest;

/**
 * 
 * @see AWS API
 *      Documentation
 */
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class CreateMLEndpointRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {

    /**
     * 

* A unique identifier for the new inference endpoint. The default is an autogenerated timestamped name. *

*/ private String id; /** *

* The job Id of the completed model-training job that has created the model that the inference endpoint will point * to. You must supply either the mlModelTrainingJobId or the mlModelTransformJobId. *

*/ private String mlModelTrainingJobId; /** *

* The job Id of the completed model-transform job. You must supply either the mlModelTrainingJobId or * the mlModelTransformJobId. *

*/ private String mlModelTransformJobId; /** *

* If set to true, update indicates that this is an update request. The default is * false. You must supply either the mlModelTrainingJobId or the * mlModelTransformJobId. *

*/ private Boolean update; /** *

* The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 resources. This must be listed in your * DB cluster parameter group or an error will be thrown. *

*/ private String neptuneIamRoleArn; /** *

* Model type for training. By default the Neptune ML model is automatically based on the modelType * used in data processing, but you can specify a different model type here. The default is rgcn for * heterogeneous graphs and kge for knowledge graphs. The only valid value for heterogeneous graphs is * rgcn. Valid values for knowledge graphs are: kge, transe, * distmult, and rotate. *

*/ private String modelName; /** *

* The type of Neptune ML instance to use for online servicing. The default is ml.m5.xlarge. Choosing * the ML instance for an inference endpoint depends on the task type, the graph size, and your budget. *

*/ private String instanceType; /** *

* The minimum number of Amazon EC2 instances to deploy to an endpoint for prediction. The default is 1 *

*/ private Integer instanceCount; /** *

* The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage volume * attached to the ML compute instances that run the training job. The default is None. *

*/ private String volumeEncryptionKMSKey; /** *

* A unique identifier for the new inference endpoint. The default is an autogenerated timestamped name. *

* * @param id * A unique identifier for the new inference endpoint. The default is an autogenerated timestamped name. */ public void setId(String id) { this.id = id; } /** *

* A unique identifier for the new inference endpoint. The default is an autogenerated timestamped name. *

* * @return A unique identifier for the new inference endpoint. The default is an autogenerated timestamped name. */ public String getId() { return this.id; } /** *

* A unique identifier for the new inference endpoint. The default is an autogenerated timestamped name. *

* * @param id * A unique identifier for the new inference endpoint. The default is an autogenerated timestamped name. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateMLEndpointRequest withId(String id) { setId(id); return this; } /** *

* The job Id of the completed model-training job that has created the model that the inference endpoint will point * to. You must supply either the mlModelTrainingJobId or the mlModelTransformJobId. *

* * @param mlModelTrainingJobId * The job Id of the completed model-training job that has created the model that the inference endpoint will * point to. You must supply either the mlModelTrainingJobId or the * mlModelTransformJobId. */ public void setMlModelTrainingJobId(String mlModelTrainingJobId) { this.mlModelTrainingJobId = mlModelTrainingJobId; } /** *

* The job Id of the completed model-training job that has created the model that the inference endpoint will point * to. You must supply either the mlModelTrainingJobId or the mlModelTransformJobId. *

* * @return The job Id of the completed model-training job that has created the model that the inference endpoint * will point to. You must supply either the mlModelTrainingJobId or the * mlModelTransformJobId. */ public String getMlModelTrainingJobId() { return this.mlModelTrainingJobId; } /** *

* The job Id of the completed model-training job that has created the model that the inference endpoint will point * to. You must supply either the mlModelTrainingJobId or the mlModelTransformJobId. *

* * @param mlModelTrainingJobId * The job Id of the completed model-training job that has created the model that the inference endpoint will * point to. You must supply either the mlModelTrainingJobId or the * mlModelTransformJobId. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateMLEndpointRequest withMlModelTrainingJobId(String mlModelTrainingJobId) { setMlModelTrainingJobId(mlModelTrainingJobId); return this; } /** *

* The job Id of the completed model-transform job. You must supply either the mlModelTrainingJobId or * the mlModelTransformJobId. *

* * @param mlModelTransformJobId * The job Id of the completed model-transform job. You must supply either the * mlModelTrainingJobId or the mlModelTransformJobId. */ public void setMlModelTransformJobId(String mlModelTransformJobId) { this.mlModelTransformJobId = mlModelTransformJobId; } /** *

* The job Id of the completed model-transform job. You must supply either the mlModelTrainingJobId or * the mlModelTransformJobId. *

* * @return The job Id of the completed model-transform job. You must supply either the * mlModelTrainingJobId or the mlModelTransformJobId. */ public String getMlModelTransformJobId() { return this.mlModelTransformJobId; } /** *

* The job Id of the completed model-transform job. You must supply either the mlModelTrainingJobId or * the mlModelTransformJobId. *

* * @param mlModelTransformJobId * The job Id of the completed model-transform job. You must supply either the * mlModelTrainingJobId or the mlModelTransformJobId. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateMLEndpointRequest withMlModelTransformJobId(String mlModelTransformJobId) { setMlModelTransformJobId(mlModelTransformJobId); return this; } /** *

* If set to true, update indicates that this is an update request. The default is * false. You must supply either the mlModelTrainingJobId or the * mlModelTransformJobId. *

* * @param update * If set to true, update indicates that this is an update request. The default is * false. You must supply either the mlModelTrainingJobId or the * mlModelTransformJobId. */ public void setUpdate(Boolean update) { this.update = update; } /** *

* If set to true, update indicates that this is an update request. The default is * false. You must supply either the mlModelTrainingJobId or the * mlModelTransformJobId. *

* * @return If set to true, update indicates that this is an update request. The default is * false. You must supply either the mlModelTrainingJobId or the * mlModelTransformJobId. */ public Boolean getUpdate() { return this.update; } /** *

* If set to true, update indicates that this is an update request. The default is * false. You must supply either the mlModelTrainingJobId or the * mlModelTransformJobId. *

* * @param update * If set to true, update indicates that this is an update request. The default is * false. You must supply either the mlModelTrainingJobId or the * mlModelTransformJobId. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateMLEndpointRequest withUpdate(Boolean update) { setUpdate(update); return this; } /** *

* If set to true, update indicates that this is an update request. The default is * false. You must supply either the mlModelTrainingJobId or the * mlModelTransformJobId. *

* * @return If set to true, update indicates that this is an update request. The default is * false. You must supply either the mlModelTrainingJobId or the * mlModelTransformJobId. */ public Boolean isUpdate() { return this.update; } /** *

* The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 resources. This must be listed in your * DB cluster parameter group or an error will be thrown. *

* * @param neptuneIamRoleArn * The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 resources. This must be listed * in your DB cluster parameter group or an error will be thrown. */ public void setNeptuneIamRoleArn(String neptuneIamRoleArn) { this.neptuneIamRoleArn = neptuneIamRoleArn; } /** *

* The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 resources. This must be listed in your * DB cluster parameter group or an error will be thrown. *

* * @return The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 resources. This must be listed * in your DB cluster parameter group or an error will be thrown. */ public String getNeptuneIamRoleArn() { return this.neptuneIamRoleArn; } /** *

* The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 resources. This must be listed in your * DB cluster parameter group or an error will be thrown. *

* * @param neptuneIamRoleArn * The ARN of an IAM role providing Neptune access to SageMaker and Amazon S3 resources. This must be listed * in your DB cluster parameter group or an error will be thrown. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateMLEndpointRequest withNeptuneIamRoleArn(String neptuneIamRoleArn) { setNeptuneIamRoleArn(neptuneIamRoleArn); return this; } /** *

* Model type for training. By default the Neptune ML model is automatically based on the modelType * used in data processing, but you can specify a different model type here. The default is rgcn for * heterogeneous graphs and kge for knowledge graphs. The only valid value for heterogeneous graphs is * rgcn. Valid values for knowledge graphs are: kge, transe, * distmult, and rotate. *

* * @param modelName * Model type for training. By default the Neptune ML model is automatically based on the * modelType used in data processing, but you can specify a different model type here. The * default is rgcn for heterogeneous graphs and kge for knowledge graphs. The only * valid value for heterogeneous graphs is rgcn. Valid values for knowledge graphs are: * kge, transe, distmult, and rotate. */ public void setModelName(String modelName) { this.modelName = modelName; } /** *

* Model type for training. By default the Neptune ML model is automatically based on the modelType * used in data processing, but you can specify a different model type here. The default is rgcn for * heterogeneous graphs and kge for knowledge graphs. The only valid value for heterogeneous graphs is * rgcn. Valid values for knowledge graphs are: kge, transe, * distmult, and rotate. *

* * @return Model type for training. By default the Neptune ML model is automatically based on the * modelType used in data processing, but you can specify a different model type here. The * default is rgcn for heterogeneous graphs and kge for knowledge graphs. The only * valid value for heterogeneous graphs is rgcn. Valid values for knowledge graphs are: * kge, transe, distmult, and rotate. */ public String getModelName() { return this.modelName; } /** *

* Model type for training. By default the Neptune ML model is automatically based on the modelType * used in data processing, but you can specify a different model type here. The default is rgcn for * heterogeneous graphs and kge for knowledge graphs. The only valid value for heterogeneous graphs is * rgcn. Valid values for knowledge graphs are: kge, transe, * distmult, and rotate. *

* * @param modelName * Model type for training. By default the Neptune ML model is automatically based on the * modelType used in data processing, but you can specify a different model type here. The * default is rgcn for heterogeneous graphs and kge for knowledge graphs. The only * valid value for heterogeneous graphs is rgcn. Valid values for knowledge graphs are: * kge, transe, distmult, and rotate. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateMLEndpointRequest withModelName(String modelName) { setModelName(modelName); return this; } /** *

* The type of Neptune ML instance to use for online servicing. The default is ml.m5.xlarge. Choosing * the ML instance for an inference endpoint depends on the task type, the graph size, and your budget. *

* * @param instanceType * The type of Neptune ML instance to use for online servicing. The default is ml.m5.xlarge. * Choosing the ML instance for an inference endpoint depends on the task type, the graph size, and your * budget. */ public void setInstanceType(String instanceType) { this.instanceType = instanceType; } /** *

* The type of Neptune ML instance to use for online servicing. The default is ml.m5.xlarge. Choosing * the ML instance for an inference endpoint depends on the task type, the graph size, and your budget. *

* * @return The type of Neptune ML instance to use for online servicing. The default is ml.m5.xlarge. * Choosing the ML instance for an inference endpoint depends on the task type, the graph size, and your * budget. */ public String getInstanceType() { return this.instanceType; } /** *

* The type of Neptune ML instance to use for online servicing. The default is ml.m5.xlarge. Choosing * the ML instance for an inference endpoint depends on the task type, the graph size, and your budget. *

* * @param instanceType * The type of Neptune ML instance to use for online servicing. The default is ml.m5.xlarge. * Choosing the ML instance for an inference endpoint depends on the task type, the graph size, and your * budget. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateMLEndpointRequest withInstanceType(String instanceType) { setInstanceType(instanceType); return this; } /** *

* The minimum number of Amazon EC2 instances to deploy to an endpoint for prediction. The default is 1 *

* * @param instanceCount * The minimum number of Amazon EC2 instances to deploy to an endpoint for prediction. The default is 1 */ public void setInstanceCount(Integer instanceCount) { this.instanceCount = instanceCount; } /** *

* The minimum number of Amazon EC2 instances to deploy to an endpoint for prediction. The default is 1 *

* * @return The minimum number of Amazon EC2 instances to deploy to an endpoint for prediction. The default is 1 */ public Integer getInstanceCount() { return this.instanceCount; } /** *

* The minimum number of Amazon EC2 instances to deploy to an endpoint for prediction. The default is 1 *

* * @param instanceCount * The minimum number of Amazon EC2 instances to deploy to an endpoint for prediction. The default is 1 * @return Returns a reference to this object so that method calls can be chained together. */ public CreateMLEndpointRequest withInstanceCount(Integer instanceCount) { setInstanceCount(instanceCount); return this; } /** *

* The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage volume * attached to the ML compute instances that run the training job. The default is None. *

* * @param volumeEncryptionKMSKey * The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage * volume attached to the ML compute instances that run the training job. The default is None. */ public void setVolumeEncryptionKMSKey(String volumeEncryptionKMSKey) { this.volumeEncryptionKMSKey = volumeEncryptionKMSKey; } /** *

* The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage volume * attached to the ML compute instances that run the training job. The default is None. *

* * @return The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage * volume attached to the ML compute instances that run the training job. The default is None. */ public String getVolumeEncryptionKMSKey() { return this.volumeEncryptionKMSKey; } /** *

* The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage volume * attached to the ML compute instances that run the training job. The default is None. *

* * @param volumeEncryptionKMSKey * The Amazon Key Management Service (Amazon KMS) key that SageMaker uses to encrypt data on the storage * volume attached to the ML compute instances that run the training job. The default is None. * @return Returns a reference to this object so that method calls can be chained together. */ public CreateMLEndpointRequest withVolumeEncryptionKMSKey(String volumeEncryptionKMSKey) { setVolumeEncryptionKMSKey(volumeEncryptionKMSKey); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getId() != null) sb.append("Id: ").append(getId()).append(","); if (getMlModelTrainingJobId() != null) sb.append("MlModelTrainingJobId: ").append(getMlModelTrainingJobId()).append(","); if (getMlModelTransformJobId() != null) sb.append("MlModelTransformJobId: ").append(getMlModelTransformJobId()).append(","); if (getUpdate() != null) sb.append("Update: ").append(getUpdate()).append(","); if (getNeptuneIamRoleArn() != null) sb.append("NeptuneIamRoleArn: ").append(getNeptuneIamRoleArn()).append(","); if (getModelName() != null) sb.append("ModelName: ").append(getModelName()).append(","); if (getInstanceType() != null) sb.append("InstanceType: ").append(getInstanceType()).append(","); if (getInstanceCount() != null) sb.append("InstanceCount: ").append(getInstanceCount()).append(","); if (getVolumeEncryptionKMSKey() != null) sb.append("VolumeEncryptionKMSKey: ").append(getVolumeEncryptionKMSKey()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof CreateMLEndpointRequest == false) return false; CreateMLEndpointRequest other = (CreateMLEndpointRequest) obj; if (other.getId() == null ^ this.getId() == null) return false; if (other.getId() != null && other.getId().equals(this.getId()) == false) return false; if (other.getMlModelTrainingJobId() == null ^ this.getMlModelTrainingJobId() == null) return false; if (other.getMlModelTrainingJobId() != null && other.getMlModelTrainingJobId().equals(this.getMlModelTrainingJobId()) == false) return false; if (other.getMlModelTransformJobId() == null ^ this.getMlModelTransformJobId() == null) return false; if (other.getMlModelTransformJobId() != null && other.getMlModelTransformJobId().equals(this.getMlModelTransformJobId()) == false) return false; if (other.getUpdate() == null ^ this.getUpdate() == null) return false; if (other.getUpdate() != null && other.getUpdate().equals(this.getUpdate()) == false) return false; if (other.getNeptuneIamRoleArn() == null ^ this.getNeptuneIamRoleArn() == null) return false; if (other.getNeptuneIamRoleArn() != null && other.getNeptuneIamRoleArn().equals(this.getNeptuneIamRoleArn()) == false) return false; if (other.getModelName() == null ^ this.getModelName() == null) return false; if (other.getModelName() != null && other.getModelName().equals(this.getModelName()) == false) return false; if (other.getInstanceType() == null ^ this.getInstanceType() == null) return false; if (other.getInstanceType() != null && other.getInstanceType().equals(this.getInstanceType()) == false) return false; if (other.getInstanceCount() == null ^ this.getInstanceCount() == null) return false; if (other.getInstanceCount() != null && other.getInstanceCount().equals(this.getInstanceCount()) == false) return false; if (other.getVolumeEncryptionKMSKey() == null ^ this.getVolumeEncryptionKMSKey() == null) return false; if (other.getVolumeEncryptionKMSKey() != null && other.getVolumeEncryptionKMSKey().equals(this.getVolumeEncryptionKMSKey()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getId() == null) ? 0 : getId().hashCode()); hashCode = prime * hashCode + ((getMlModelTrainingJobId() == null) ? 0 : getMlModelTrainingJobId().hashCode()); hashCode = prime * hashCode + ((getMlModelTransformJobId() == null) ? 0 : getMlModelTransformJobId().hashCode()); hashCode = prime * hashCode + ((getUpdate() == null) ? 0 : getUpdate().hashCode()); hashCode = prime * hashCode + ((getNeptuneIamRoleArn() == null) ? 0 : getNeptuneIamRoleArn().hashCode()); hashCode = prime * hashCode + ((getModelName() == null) ? 0 : getModelName().hashCode()); hashCode = prime * hashCode + ((getInstanceType() == null) ? 0 : getInstanceType().hashCode()); hashCode = prime * hashCode + ((getInstanceCount() == null) ? 0 : getInstanceCount().hashCode()); hashCode = prime * hashCode + ((getVolumeEncryptionKMSKey() == null) ? 0 : getVolumeEncryptionKMSKey().hashCode()); return hashCode; } @Override public CreateMLEndpointRequest clone() { return (CreateMLEndpointRequest) super.clone(); } }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy