All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.amazonaws.services.bedrockruntime.model.InferenceConfiguration Maven / Gradle / Ivy

Go to download

The AWS Java SDK for Amazon Bedrock Runtime module holds the client classes that are used for communicating with Amazon Bedrock Runtime Service

There is a newer version: 1.12.780
Show newest version
/*
 * Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
 * 
 * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
 * the License. A copy of the License is located at
 * 
 * http://aws.amazon.com/apache2.0
 * 
 * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
 * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
 * and limitations under the License.
 */
package com.amazonaws.services.bedrockruntime.model;

import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;

/**
 * 

* Base inference parameters to pass to a model in a call to Converse or ConverseStream. * For more information, see Inference parameters for foundation * models. *

*

* If you need to pass additional parameters that the model supports, use the additionalModelRequestFields * request field in the call to Converse or ConverseStream. For more information, see Model parameters. *

* * @see AWS API Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class InferenceConfiguration implements Serializable, Cloneable, StructuredPojo { /** *

* The maximum number of tokens to allow in the generated response. The default value is the maximum allowed value * for the model that you are using. For more information, see Inference parameters for * foundation models. *

*/ private Integer maxTokens; /** *

* The likelihood of the model selecting higher-probability options while generating a response. A lower value makes * the model more likely to choose higher-probability options, while a higher value makes the model more likely to * choose lower-probability options. *

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters for * foundation models. *

*/ private Float temperature; /** *

* The percentage of most-likely candidates that the model considers for the next token. For example, if you choose * a value of 0.8 for topP, the model selects from the top 80% of the probability distribution of * tokens that could be next in the sequence. *

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters for * foundation models. *

*/ private Float topP; /** *

* A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating * the response. *

*/ private java.util.List stopSequences; /** *

* The maximum number of tokens to allow in the generated response. The default value is the maximum allowed value * for the model that you are using. For more information, see Inference parameters for * foundation models. *

* * @param maxTokens * The maximum number of tokens to allow in the generated response. The default value is the maximum allowed * value for the model that you are using. For more information, see Inference parameters for * foundation models. */ public void setMaxTokens(Integer maxTokens) { this.maxTokens = maxTokens; } /** *

* The maximum number of tokens to allow in the generated response. The default value is the maximum allowed value * for the model that you are using. For more information, see Inference parameters for * foundation models. *

* * @return The maximum number of tokens to allow in the generated response. The default value is the maximum allowed * value for the model that you are using. For more information, see Inference parameters * for foundation models. */ public Integer getMaxTokens() { return this.maxTokens; } /** *

* The maximum number of tokens to allow in the generated response. The default value is the maximum allowed value * for the model that you are using. For more information, see Inference parameters for * foundation models. *

* * @param maxTokens * The maximum number of tokens to allow in the generated response. The default value is the maximum allowed * value for the model that you are using. For more information, see Inference parameters for * foundation models. * @return Returns a reference to this object so that method calls can be chained together. */ public InferenceConfiguration withMaxTokens(Integer maxTokens) { setMaxTokens(maxTokens); return this; } /** *

* The likelihood of the model selecting higher-probability options while generating a response. A lower value makes * the model more likely to choose higher-probability options, while a higher value makes the model more likely to * choose lower-probability options. *

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters for * foundation models. *

* * @param temperature * The likelihood of the model selecting higher-probability options while generating a response. A lower * value makes the model more likely to choose higher-probability options, while a higher value makes the * model more likely to choose lower-probability options.

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters for * foundation models. */ public void setTemperature(Float temperature) { this.temperature = temperature; } /** *

* The likelihood of the model selecting higher-probability options while generating a response. A lower value makes * the model more likely to choose higher-probability options, while a higher value makes the model more likely to * choose lower-probability options. *

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters for * foundation models. *

* * @return The likelihood of the model selecting higher-probability options while generating a response. A lower * value makes the model more likely to choose higher-probability options, while a higher value makes the * model more likely to choose lower-probability options.

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters * for foundation models. */ public Float getTemperature() { return this.temperature; } /** *

* The likelihood of the model selecting higher-probability options while generating a response. A lower value makes * the model more likely to choose higher-probability options, while a higher value makes the model more likely to * choose lower-probability options. *

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters for * foundation models. *

* * @param temperature * The likelihood of the model selecting higher-probability options while generating a response. A lower * value makes the model more likely to choose higher-probability options, while a higher value makes the * model more likely to choose lower-probability options.

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters for * foundation models. * @return Returns a reference to this object so that method calls can be chained together. */ public InferenceConfiguration withTemperature(Float temperature) { setTemperature(temperature); return this; } /** *

* The percentage of most-likely candidates that the model considers for the next token. For example, if you choose * a value of 0.8 for topP, the model selects from the top 80% of the probability distribution of * tokens that could be next in the sequence. *

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters for * foundation models. *

* * @param topP * The percentage of most-likely candidates that the model considers for the next token. For example, if you * choose a value of 0.8 for topP, the model selects from the top 80% of the probability * distribution of tokens that could be next in the sequence.

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters for * foundation models. */ public void setTopP(Float topP) { this.topP = topP; } /** *

* The percentage of most-likely candidates that the model considers for the next token. For example, if you choose * a value of 0.8 for topP, the model selects from the top 80% of the probability distribution of * tokens that could be next in the sequence. *

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters for * foundation models. *

* * @return The percentage of most-likely candidates that the model considers for the next token. For example, if you * choose a value of 0.8 for topP, the model selects from the top 80% of the probability * distribution of tokens that could be next in the sequence.

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters * for foundation models. */ public Float getTopP() { return this.topP; } /** *

* The percentage of most-likely candidates that the model considers for the next token. For example, if you choose * a value of 0.8 for topP, the model selects from the top 80% of the probability distribution of * tokens that could be next in the sequence. *

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters for * foundation models. *

* * @param topP * The percentage of most-likely candidates that the model considers for the next token. For example, if you * choose a value of 0.8 for topP, the model selects from the top 80% of the probability * distribution of tokens that could be next in the sequence.

*

* The default value is the default value for the model that you are using. For more information, see Inference parameters for * foundation models. * @return Returns a reference to this object so that method calls can be chained together. */ public InferenceConfiguration withTopP(Float topP) { setTopP(topP); return this; } /** *

* A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating * the response. *

* * @return A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop * generating the response. */ public java.util.List getStopSequences() { return stopSequences; } /** *

* A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating * the response. *

* * @param stopSequences * A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop * generating the response. */ public void setStopSequences(java.util.Collection stopSequences) { if (stopSequences == null) { this.stopSequences = null; return; } this.stopSequences = new java.util.ArrayList(stopSequences); } /** *

* A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating * the response. *

*

* NOTE: This method appends the values to the existing list (if any). Use * {@link #setStopSequences(java.util.Collection)} or {@link #withStopSequences(java.util.Collection)} if you want * to override the existing values. *

* * @param stopSequences * A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop * generating the response. * @return Returns a reference to this object so that method calls can be chained together. */ public InferenceConfiguration withStopSequences(String... stopSequences) { if (this.stopSequences == null) { setStopSequences(new java.util.ArrayList(stopSequences.length)); } for (String ele : stopSequences) { this.stopSequences.add(ele); } return this; } /** *

* A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating * the response. *

* * @param stopSequences * A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop * generating the response. * @return Returns a reference to this object so that method calls can be chained together. */ public InferenceConfiguration withStopSequences(java.util.Collection stopSequences) { setStopSequences(stopSequences); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getMaxTokens() != null) sb.append("MaxTokens: ").append(getMaxTokens()).append(","); if (getTemperature() != null) sb.append("Temperature: ").append(getTemperature()).append(","); if (getTopP() != null) sb.append("TopP: ").append(getTopP()).append(","); if (getStopSequences() != null) sb.append("StopSequences: ").append(getStopSequences()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof InferenceConfiguration == false) return false; InferenceConfiguration other = (InferenceConfiguration) obj; if (other.getMaxTokens() == null ^ this.getMaxTokens() == null) return false; if (other.getMaxTokens() != null && other.getMaxTokens().equals(this.getMaxTokens()) == false) return false; if (other.getTemperature() == null ^ this.getTemperature() == null) return false; if (other.getTemperature() != null && other.getTemperature().equals(this.getTemperature()) == false) return false; if (other.getTopP() == null ^ this.getTopP() == null) return false; if (other.getTopP() != null && other.getTopP().equals(this.getTopP()) == false) return false; if (other.getStopSequences() == null ^ this.getStopSequences() == null) return false; if (other.getStopSequences() != null && other.getStopSequences().equals(this.getStopSequences()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getMaxTokens() == null) ? 0 : getMaxTokens().hashCode()); hashCode = prime * hashCode + ((getTemperature() == null) ? 0 : getTemperature().hashCode()); hashCode = prime * hashCode + ((getTopP() == null) ? 0 : getTopP().hashCode()); hashCode = prime * hashCode + ((getStopSequences() == null) ? 0 : getStopSequences().hashCode()); return hashCode; } @Override public InferenceConfiguration clone() { try { return (InferenceConfiguration) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.bedrockruntime.model.transform.InferenceConfigurationMarshaller.getInstance().marshall(this, protocolMarshaller); } }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy