com.amazonaws.services.transcribe.model.StartTranscriptionJobRequest Maven / Gradle / Ivy
Show all versions of aws-java-sdk-transcribe Show documentation
/*
* Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.transcribe.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
/**
*
* @see AWS
* API Documentation
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class StartTranscriptionJobRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
*
* A unique name, chosen by you, for your transcription job. The name that you specify is also used as the default
* name of your transcription output file. If you want to specify a different name for your transcription output,
* use the OutputKey
parameter.
*
*
* This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If
* you try to create a new job with the same name as an existing job, you get a ConflictException
* error.
*
*/
private String transcriptionJobName;
/**
*
* The language code that represents the language spoken in the input media file.
*
*
* If you're unsure of the language spoken in your media file, consider using IdentifyLanguage
or
* IdentifyMultipleLanguages
to enable automatic language identification.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* For a list of supported languages and their associated language codes, refer to the Supported languages table.
*
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample
* rate of 16,000 Hz or higher.
*
*
*/
private String languageCode;
/**
*
* The sample rate, in hertz, of the audio track in your input media file.
*
*
* If you do not specify the media sample rate, Amazon Transcribe determines it for you. If you specify the sample
* rate, it must match the rate detected by Amazon Transcribe. If there's a mismatch between the value that you
* specify and the value detected, your job fails. In most cases, you can omit MediaSampleRateHertz
and
* let Amazon Transcribe determine the sample rate.
*
*/
private Integer mediaSampleRateHertz;
/**
*
* Specify the format of your input media file.
*
*/
private String mediaFormat;
/**
*
* Describes the Amazon S3 location of the media file you want to use in your request.
*
*/
private Media media;
/**
*
* The name of the Amazon S3 bucket where you want your transcription output stored. Do not include the
* S3://
prefix of the specified bucket.
*
*
* If you want your output to go to a sub-folder of this bucket, specify it using the OutputKey
* parameter; OutputBucketName
only accepts the name of a bucket.
*
*
* For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
, set
* OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you want your output stored in
* S3://DOC-EXAMPLE-BUCKET/test-files/
, set OutputBucketName
to
* DOC-EXAMPLE-BUCKET
and OutputKey
to test-files/
.
*
*
* Note that Amazon Transcribe must have permission to use the specified location. You can change Amazon S3
* permissions using the Amazon Web Services Management Console. See
* also Permissions Required for IAM User Roles.
*
*
* If you do not specify OutputBucketName
, your transcript is placed in a service-managed Amazon S3
* bucket and you are provided with a URI to access your transcript.
*
*/
private String outputBucketName;
/**
*
* Use in combination with OutputBucketName
to specify the output location of your transcript and,
* optionally, a unique name for your output file. The default name for your transcription output is the same as the
* name you specified for your transcription job (TranscriptionJobName
).
*
*
* Here are some examples of how you can use OutputKey
:
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and 'my-transcript.json' as the
* OutputKey
, your transcription output path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json
* .
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the
* OutputBucketName
, and 'my-transcript' as the OutputKey
, your transcription output path
* is s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json
.
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and 'test-files/my-transcript.json' as
* the OutputKey
, your transcription output path is
* s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json
.
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the
* OutputBucketName
, and 'test-files/my-transcript' as the OutputKey
, your transcription
* output path is s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json
.
*
*
*
*
* If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist, one is created for you.
*
*/
private String outputKey;
/**
*
* The KMS key you want to use to encrypt your transcription output.
*
*
* If using a key located in the current Amazon Web Services account, you can specify your KMS key in one of
* four ways:
*
*
* -
*
* Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use an alias for the KMS key ID. For example, alias/ExampleAlias
.
*
*
* -
*
* Use the Amazon Resource Name (ARN) for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If using a key located in a different Amazon Web Services account than the current Amazon Web Services
* account, you can specify your KMS key in one of two ways:
*
*
* -
*
* Use the ARN for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If you do not specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3).
*
*
* If you specify a KMS key to encrypt your output, you must also specify an output location using the
* OutputLocation
parameter.
*
*
* Note that the role making the request must have permission to use the specified KMS key.
*
*/
private String outputEncryptionKMSKeyId;
/**
*
* A map of plain text, non-secret key:value pairs, known as encryption context pairs, that provide an added layer
* of security for your data. For more information, see KMS encryption
* context and Asymmetric
* keys in KMS.
*
*/
private java.util.Map kMSEncryptionContext;
/**
*
* Specify additional optional settings in your request, including channel identification, alternative
* transcriptions, speaker partitioning. You can use that to apply custom vocabularies and vocabulary filters.
*
*
* If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do
* not want to use automatic language identification, use Settings
with the
* VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
*
*
* If you're using automatic language identification with your request and want to include a custom language model,
* a custom vocabulary, or a custom vocabulary filter, use instead the
*
parameter with the LanguageModelName
, VocabularyName
or
* VocabularyFilterName
sub-parameters.
*
*/
private Settings settings;
/**
*
* Specify the custom language model you want to include with your transcription job. If you include
* ModelSettings
in your request, you must include the LanguageModelName
sub-parameter.
*
*
* For more information, see Custom language models.
*
*/
private ModelSettings modelSettings;
/**
*
* Makes it possible to control how your transcription job is processed. Currently, the only
* JobExecutionSettings
modification you can choose is enabling job queueing using the
* AllowDeferredExecution
sub-parameter.
*
*
* If you include JobExecutionSettings
in your request, you must also include the sub-parameters:
* AllowDeferredExecution
and DataAccessRoleArn
.
*
*/
private JobExecutionSettings jobExecutionSettings;
/**
*
* Makes it possible to redact or flag specified personally identifiable information (PII) in your transcript. If
* you use ContentRedaction
, you must also include the sub-parameters: RedactionOutput
and
* RedactionType
. You can optionally include PiiEntityTypes
to choose which types of PII
* you want to redact. If you do not include PiiEntityTypes
in your request, all PII is redacted.
*
*/
private ContentRedaction contentRedaction;
/**
*
* Enables automatic language identification in your transcription job request. Use this parameter if your media
* file contains only one language. If your media contains multiple languages, use
* IdentifyMultipleLanguages
instead.
*
*
* If you include IdentifyLanguage
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your
* automatic language identification request, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*/
private Boolean identifyLanguage;
/**
*
* Enables automatic multi-language identification in your transcription job request. Use this parameter if your
* media file contains more than one language. If your media contains only one language, use
* IdentifyLanguage
instead.
*
*
* If you include IdentifyMultipleLanguages
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language identification
* request, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
* and VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*/
private Boolean identifyMultipleLanguages;
/**
*
* You can specify two or more language codes that represent the languages you think may be present in your media.
* Including more than five is not recommended. If you're unsure what languages are present, do not include this
* parameter.
*
*
* If you include LanguageOptions
in your request, you must also include IdentifyLanguage
.
*
*
* For more information, refer to Supported languages.
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample
* rate of 16,000 Hz or higher.
*
*/
private java.util.List languageOptions;
/**
*
* Produces subtitle files for your input media. You can specify WebVTT (*.vtt) and SubRip (*.srt) formats.
*
*/
private Subtitles subtitles;
/**
*
* Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time you
* start this new job.
*
*
* To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
*
*/
private java.util.List tags;
/**
*
* If using automatic language identification in your request and you want to apply a custom language model, a
* custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). Note that multi-language identification (
* IdentifyMultipleLanguages
) doesn't support custom language models.
*
*
* LanguageIdSettings
supports two to five language codes. Each language code you include can have an
* associated custom language model, custom vocabulary, and custom vocabulary filter. The language codes that you
* specify must match the languages of the associated custom language models, custom vocabularies, and custom
* vocabulary filters.
*
*
* It's recommended that you include LanguageOptions
when using LanguageIdSettings
to
* ensure that the correct language dialect is identified. For example, if you specify a custom vocabulary that is
* in en-US
but Amazon Transcribe determines that the language spoken in your media is
* en-AU
, your custom vocabulary is not applied to your transcription. If you include
* LanguageOptions
and include en-US
as the only English language dialect, your custom
* vocabulary is applied to your transcription.
*
*
* If you want to include a custom language model with your request but do not want to use automatic language
* identification, use instead the
parameter with the LanguageModelName
sub-parameter. If you
* want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not
* want to use automatic language identification, use instead the
*
parameter with the VocabularyName
or VocabularyFilterName
(or both)
* sub-parameter.
*
*/
private java.util.Map languageIdSettings;
/**
*
* Enables toxic speech detection in your transcript. If you include ToxicityDetection
in your request,
* you must also include ToxicityCategories
.
*
*
* For information on the types of toxic speech Amazon Transcribe can detect, see Detecting toxic speech.
*
*/
private java.util.List toxicityDetection;
/**
*
* A unique name, chosen by you, for your transcription job. The name that you specify is also used as the default
* name of your transcription output file. If you want to specify a different name for your transcription output,
* use the OutputKey
parameter.
*
*
* This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If
* you try to create a new job with the same name as an existing job, you get a ConflictException
* error.
*
*
* @param transcriptionJobName
* A unique name, chosen by you, for your transcription job. The name that you specify is also used as the
* default name of your transcription output file. If you want to specify a different name for your
* transcription output, use the OutputKey
parameter.
*
* This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services
* account. If you try to create a new job with the same name as an existing job, you get a
* ConflictException
error.
*/
public void setTranscriptionJobName(String transcriptionJobName) {
this.transcriptionJobName = transcriptionJobName;
}
/**
*
* A unique name, chosen by you, for your transcription job. The name that you specify is also used as the default
* name of your transcription output file. If you want to specify a different name for your transcription output,
* use the OutputKey
parameter.
*
*
* This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If
* you try to create a new job with the same name as an existing job, you get a ConflictException
* error.
*
*
* @return A unique name, chosen by you, for your transcription job. The name that you specify is also used as the
* default name of your transcription output file. If you want to specify a different name for your
* transcription output, use the OutputKey
parameter.
*
* This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services
* account. If you try to create a new job with the same name as an existing job, you get a
* ConflictException
error.
*/
public String getTranscriptionJobName() {
return this.transcriptionJobName;
}
/**
*
* A unique name, chosen by you, for your transcription job. The name that you specify is also used as the default
* name of your transcription output file. If you want to specify a different name for your transcription output,
* use the OutputKey
parameter.
*
*
* This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account. If
* you try to create a new job with the same name as an existing job, you get a ConflictException
* error.
*
*
* @param transcriptionJobName
* A unique name, chosen by you, for your transcription job. The name that you specify is also used as the
* default name of your transcription output file. If you want to specify a different name for your
* transcription output, use the OutputKey
parameter.
*
* This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services
* account. If you try to create a new job with the same name as an existing job, you get a
* ConflictException
error.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withTranscriptionJobName(String transcriptionJobName) {
setTranscriptionJobName(transcriptionJobName);
return this;
}
/**
*
* The language code that represents the language spoken in the input media file.
*
*
* If you're unsure of the language spoken in your media file, consider using IdentifyLanguage
or
* IdentifyMultipleLanguages
to enable automatic language identification.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* For a list of supported languages and their associated language codes, refer to the Supported languages table.
*
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample
* rate of 16,000 Hz or higher.
*
*
*
* @param languageCode
* The language code that represents the language spoken in the input media file.
*
* If you're unsure of the language spoken in your media file, consider using IdentifyLanguage
* or IdentifyMultipleLanguages
to enable automatic language identification.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters,
* your transcription job fails.
*
*
* For a list of supported languages and their associated language codes, refer to the Supported languages
* table.
*
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a
* sample rate of 16,000 Hz or higher.
*
* @see LanguageCode
*/
public void setLanguageCode(String languageCode) {
this.languageCode = languageCode;
}
/**
*
* The language code that represents the language spoken in the input media file.
*
*
* If you're unsure of the language spoken in your media file, consider using IdentifyLanguage
or
* IdentifyMultipleLanguages
to enable automatic language identification.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* For a list of supported languages and their associated language codes, refer to the Supported languages table.
*
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample
* rate of 16,000 Hz or higher.
*
*
*
* @return The language code that represents the language spoken in the input media file.
*
* If you're unsure of the language spoken in your media file, consider using IdentifyLanguage
* or IdentifyMultipleLanguages
to enable automatic language identification.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters,
* your transcription job fails.
*
*
* For a list of supported languages and their associated language codes, refer to the Supported languages
* table.
*
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a
* sample rate of 16,000 Hz or higher.
*
* @see LanguageCode
*/
public String getLanguageCode() {
return this.languageCode;
}
/**
*
* The language code that represents the language spoken in the input media file.
*
*
* If you're unsure of the language spoken in your media file, consider using IdentifyLanguage
or
* IdentifyMultipleLanguages
to enable automatic language identification.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* For a list of supported languages and their associated language codes, refer to the Supported languages table.
*
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample
* rate of 16,000 Hz or higher.
*
*
*
* @param languageCode
* The language code that represents the language spoken in the input media file.
*
* If you're unsure of the language spoken in your media file, consider using IdentifyLanguage
* or IdentifyMultipleLanguages
to enable automatic language identification.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters,
* your transcription job fails.
*
*
* For a list of supported languages and their associated language codes, refer to the Supported languages
* table.
*
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a
* sample rate of 16,000 Hz or higher.
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see LanguageCode
*/
public StartTranscriptionJobRequest withLanguageCode(String languageCode) {
setLanguageCode(languageCode);
return this;
}
/**
*
* The language code that represents the language spoken in the input media file.
*
*
* If you're unsure of the language spoken in your media file, consider using IdentifyLanguage
or
* IdentifyMultipleLanguages
to enable automatic language identification.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* For a list of supported languages and their associated language codes, refer to the Supported languages table.
*
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample
* rate of 16,000 Hz or higher.
*
*
*
* @param languageCode
* The language code that represents the language spoken in the input media file.
*
* If you're unsure of the language spoken in your media file, consider using IdentifyLanguage
* or IdentifyMultipleLanguages
to enable automatic language identification.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters,
* your transcription job fails.
*
*
* For a list of supported languages and their associated language codes, refer to the Supported languages
* table.
*
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a
* sample rate of 16,000 Hz or higher.
*
* @return Returns a reference to this object so that method calls can be chained together.
* @see LanguageCode
*/
public StartTranscriptionJobRequest withLanguageCode(LanguageCode languageCode) {
this.languageCode = languageCode.toString();
return this;
}
/**
*
* The sample rate, in hertz, of the audio track in your input media file.
*
*
* If you do not specify the media sample rate, Amazon Transcribe determines it for you. If you specify the sample
* rate, it must match the rate detected by Amazon Transcribe. If there's a mismatch between the value that you
* specify and the value detected, your job fails. In most cases, you can omit MediaSampleRateHertz
and
* let Amazon Transcribe determine the sample rate.
*
*
* @param mediaSampleRateHertz
* The sample rate, in hertz, of the audio track in your input media file.
*
* If you do not specify the media sample rate, Amazon Transcribe determines it for you. If you specify the
* sample rate, it must match the rate detected by Amazon Transcribe. If there's a mismatch between the value
* that you specify and the value detected, your job fails. In most cases, you can omit
* MediaSampleRateHertz
and let Amazon Transcribe determine the sample rate.
*/
public void setMediaSampleRateHertz(Integer mediaSampleRateHertz) {
this.mediaSampleRateHertz = mediaSampleRateHertz;
}
/**
*
* The sample rate, in hertz, of the audio track in your input media file.
*
*
* If you do not specify the media sample rate, Amazon Transcribe determines it for you. If you specify the sample
* rate, it must match the rate detected by Amazon Transcribe. If there's a mismatch between the value that you
* specify and the value detected, your job fails. In most cases, you can omit MediaSampleRateHertz
and
* let Amazon Transcribe determine the sample rate.
*
*
* @return The sample rate, in hertz, of the audio track in your input media file.
*
* If you do not specify the media sample rate, Amazon Transcribe determines it for you. If you specify the
* sample rate, it must match the rate detected by Amazon Transcribe. If there's a mismatch between the
* value that you specify and the value detected, your job fails. In most cases, you can omit
* MediaSampleRateHertz
and let Amazon Transcribe determine the sample rate.
*/
public Integer getMediaSampleRateHertz() {
return this.mediaSampleRateHertz;
}
/**
*
* The sample rate, in hertz, of the audio track in your input media file.
*
*
* If you do not specify the media sample rate, Amazon Transcribe determines it for you. If you specify the sample
* rate, it must match the rate detected by Amazon Transcribe. If there's a mismatch between the value that you
* specify and the value detected, your job fails. In most cases, you can omit MediaSampleRateHertz
and
* let Amazon Transcribe determine the sample rate.
*
*
* @param mediaSampleRateHertz
* The sample rate, in hertz, of the audio track in your input media file.
*
* If you do not specify the media sample rate, Amazon Transcribe determines it for you. If you specify the
* sample rate, it must match the rate detected by Amazon Transcribe. If there's a mismatch between the value
* that you specify and the value detected, your job fails. In most cases, you can omit
* MediaSampleRateHertz
and let Amazon Transcribe determine the sample rate.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withMediaSampleRateHertz(Integer mediaSampleRateHertz) {
setMediaSampleRateHertz(mediaSampleRateHertz);
return this;
}
/**
*
* Specify the format of your input media file.
*
*
* @param mediaFormat
* Specify the format of your input media file.
* @see MediaFormat
*/
public void setMediaFormat(String mediaFormat) {
this.mediaFormat = mediaFormat;
}
/**
*
* Specify the format of your input media file.
*
*
* @return Specify the format of your input media file.
* @see MediaFormat
*/
public String getMediaFormat() {
return this.mediaFormat;
}
/**
*
* Specify the format of your input media file.
*
*
* @param mediaFormat
* Specify the format of your input media file.
* @return Returns a reference to this object so that method calls can be chained together.
* @see MediaFormat
*/
public StartTranscriptionJobRequest withMediaFormat(String mediaFormat) {
setMediaFormat(mediaFormat);
return this;
}
/**
*
* Specify the format of your input media file.
*
*
* @param mediaFormat
* Specify the format of your input media file.
* @return Returns a reference to this object so that method calls can be chained together.
* @see MediaFormat
*/
public StartTranscriptionJobRequest withMediaFormat(MediaFormat mediaFormat) {
this.mediaFormat = mediaFormat.toString();
return this;
}
/**
*
* Describes the Amazon S3 location of the media file you want to use in your request.
*
*
* @param media
* Describes the Amazon S3 location of the media file you want to use in your request.
*/
public void setMedia(Media media) {
this.media = media;
}
/**
*
* Describes the Amazon S3 location of the media file you want to use in your request.
*
*
* @return Describes the Amazon S3 location of the media file you want to use in your request.
*/
public Media getMedia() {
return this.media;
}
/**
*
* Describes the Amazon S3 location of the media file you want to use in your request.
*
*
* @param media
* Describes the Amazon S3 location of the media file you want to use in your request.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withMedia(Media media) {
setMedia(media);
return this;
}
/**
*
* The name of the Amazon S3 bucket where you want your transcription output stored. Do not include the
* S3://
prefix of the specified bucket.
*
*
* If you want your output to go to a sub-folder of this bucket, specify it using the OutputKey
* parameter; OutputBucketName
only accepts the name of a bucket.
*
*
* For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
, set
* OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you want your output stored in
* S3://DOC-EXAMPLE-BUCKET/test-files/
, set OutputBucketName
to
* DOC-EXAMPLE-BUCKET
and OutputKey
to test-files/
.
*
*
* Note that Amazon Transcribe must have permission to use the specified location. You can change Amazon S3
* permissions using the Amazon Web Services Management Console. See
* also Permissions Required for IAM User Roles.
*
*
* If you do not specify OutputBucketName
, your transcript is placed in a service-managed Amazon S3
* bucket and you are provided with a URI to access your transcript.
*
*
* @param outputBucketName
* The name of the Amazon S3 bucket where you want your transcription output stored. Do not include the
* S3://
prefix of the specified bucket.
*
* If you want your output to go to a sub-folder of this bucket, specify it using the OutputKey
* parameter; OutputBucketName
only accepts the name of a bucket.
*
*
* For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
, set
* OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you want your output stored
* in S3://DOC-EXAMPLE-BUCKET/test-files/
, set OutputBucketName
to
* DOC-EXAMPLE-BUCKET
and OutputKey
to test-files/
.
*
*
* Note that Amazon Transcribe must have permission to use the specified location. You can change Amazon S3
* permissions using the Amazon Web Services Management
* Console. See also Permissions Required for IAM User Roles.
*
*
* If you do not specify OutputBucketName
, your transcript is placed in a service-managed Amazon
* S3 bucket and you are provided with a URI to access your transcript.
*/
public void setOutputBucketName(String outputBucketName) {
this.outputBucketName = outputBucketName;
}
/**
*
* The name of the Amazon S3 bucket where you want your transcription output stored. Do not include the
* S3://
prefix of the specified bucket.
*
*
* If you want your output to go to a sub-folder of this bucket, specify it using the OutputKey
* parameter; OutputBucketName
only accepts the name of a bucket.
*
*
* For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
, set
* OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you want your output stored in
* S3://DOC-EXAMPLE-BUCKET/test-files/
, set OutputBucketName
to
* DOC-EXAMPLE-BUCKET
and OutputKey
to test-files/
.
*
*
* Note that Amazon Transcribe must have permission to use the specified location. You can change Amazon S3
* permissions using the Amazon Web Services Management Console. See
* also Permissions Required for IAM User Roles.
*
*
* If you do not specify OutputBucketName
, your transcript is placed in a service-managed Amazon S3
* bucket and you are provided with a URI to access your transcript.
*
*
* @return The name of the Amazon S3 bucket where you want your transcription output stored. Do not include the
* S3://
prefix of the specified bucket.
*
* If you want your output to go to a sub-folder of this bucket, specify it using the OutputKey
* parameter; OutputBucketName
only accepts the name of a bucket.
*
*
* For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
, set
* OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you want your output stored
* in S3://DOC-EXAMPLE-BUCKET/test-files/
, set OutputBucketName
to
* DOC-EXAMPLE-BUCKET
and OutputKey
to test-files/
.
*
*
* Note that Amazon Transcribe must have permission to use the specified location. You can change Amazon S3
* permissions using the Amazon Web Services Management
* Console. See also Permissions Required for IAM User Roles.
*
*
* If you do not specify OutputBucketName
, your transcript is placed in a service-managed
* Amazon S3 bucket and you are provided with a URI to access your transcript.
*/
public String getOutputBucketName() {
return this.outputBucketName;
}
/**
*
* The name of the Amazon S3 bucket where you want your transcription output stored. Do not include the
* S3://
prefix of the specified bucket.
*
*
* If you want your output to go to a sub-folder of this bucket, specify it using the OutputKey
* parameter; OutputBucketName
only accepts the name of a bucket.
*
*
* For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
, set
* OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you want your output stored in
* S3://DOC-EXAMPLE-BUCKET/test-files/
, set OutputBucketName
to
* DOC-EXAMPLE-BUCKET
and OutputKey
to test-files/
.
*
*
* Note that Amazon Transcribe must have permission to use the specified location. You can change Amazon S3
* permissions using the Amazon Web Services Management Console. See
* also Permissions Required for IAM User Roles.
*
*
* If you do not specify OutputBucketName
, your transcript is placed in a service-managed Amazon S3
* bucket and you are provided with a URI to access your transcript.
*
*
* @param outputBucketName
* The name of the Amazon S3 bucket where you want your transcription output stored. Do not include the
* S3://
prefix of the specified bucket.
*
* If you want your output to go to a sub-folder of this bucket, specify it using the OutputKey
* parameter; OutputBucketName
only accepts the name of a bucket.
*
*
* For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
, set
* OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you want your output stored
* in S3://DOC-EXAMPLE-BUCKET/test-files/
, set OutputBucketName
to
* DOC-EXAMPLE-BUCKET
and OutputKey
to test-files/
.
*
*
* Note that Amazon Transcribe must have permission to use the specified location. You can change Amazon S3
* permissions using the Amazon Web Services Management
* Console. See also Permissions Required for IAM User Roles.
*
*
* If you do not specify OutputBucketName
, your transcript is placed in a service-managed Amazon
* S3 bucket and you are provided with a URI to access your transcript.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withOutputBucketName(String outputBucketName) {
setOutputBucketName(outputBucketName);
return this;
}
/**
*
* Use in combination with OutputBucketName
to specify the output location of your transcript and,
* optionally, a unique name for your output file. The default name for your transcription output is the same as the
* name you specified for your transcription job (TranscriptionJobName
).
*
*
* Here are some examples of how you can use OutputKey
:
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and 'my-transcript.json' as the
* OutputKey
, your transcription output path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json
* .
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the
* OutputBucketName
, and 'my-transcript' as the OutputKey
, your transcription output path
* is s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json
.
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and 'test-files/my-transcript.json' as
* the OutputKey
, your transcription output path is
* s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json
.
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the
* OutputBucketName
, and 'test-files/my-transcript' as the OutputKey
, your transcription
* output path is s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json
.
*
*
*
*
* If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist, one is created for you.
*
*
* @param outputKey
* Use in combination with OutputBucketName
to specify the output location of your transcript
* and, optionally, a unique name for your output file. The default name for your transcription output is the
* same as the name you specified for your transcription job (TranscriptionJobName
).
*
* Here are some examples of how you can use OutputKey
:
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and 'my-transcript.json' as the
* OutputKey
, your transcription output path is
* s3://DOC-EXAMPLE-BUCKET/my-transcript.json
.
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as
* the OutputBucketName
, and 'my-transcript' as the OutputKey
, your transcription
* output path is s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json
.
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and
* 'test-files/my-transcript.json' as the OutputKey
, your transcription output path is
* s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json
.
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as
* the OutputBucketName
, and 'test-files/my-transcript' as the OutputKey
, your
* transcription output path is
* s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json
.
*
*
*
*
* If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist, one is created for you.
*/
public void setOutputKey(String outputKey) {
this.outputKey = outputKey;
}
/**
*
* Use in combination with OutputBucketName
to specify the output location of your transcript and,
* optionally, a unique name for your output file. The default name for your transcription output is the same as the
* name you specified for your transcription job (TranscriptionJobName
).
*
*
* Here are some examples of how you can use OutputKey
:
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and 'my-transcript.json' as the
* OutputKey
, your transcription output path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json
* .
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the
* OutputBucketName
, and 'my-transcript' as the OutputKey
, your transcription output path
* is s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json
.
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and 'test-files/my-transcript.json' as
* the OutputKey
, your transcription output path is
* s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json
.
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the
* OutputBucketName
, and 'test-files/my-transcript' as the OutputKey
, your transcription
* output path is s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json
.
*
*
*
*
* If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist, one is created for you.
*
*
* @return Use in combination with OutputBucketName
to specify the output location of your transcript
* and, optionally, a unique name for your output file. The default name for your transcription output is
* the same as the name you specified for your transcription job (TranscriptionJobName
).
*
* Here are some examples of how you can use OutputKey
:
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and 'my-transcript.json' as the
* OutputKey
, your transcription output path is
* s3://DOC-EXAMPLE-BUCKET/my-transcript.json
.
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as
* the OutputBucketName
, and 'my-transcript' as the OutputKey
, your transcription
* output path is s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json
.
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and
* 'test-files/my-transcript.json' as the OutputKey
, your transcription output path is
* s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json
.
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as
* the OutputBucketName
, and 'test-files/my-transcript' as the OutputKey
, your
* transcription output path is
* s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json
.
*
*
*
*
* If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist, one is created for you.
*/
public String getOutputKey() {
return this.outputKey;
}
/**
*
* Use in combination with OutputBucketName
to specify the output location of your transcript and,
* optionally, a unique name for your output file. The default name for your transcription output is the same as the
* name you specified for your transcription job (TranscriptionJobName
).
*
*
* Here are some examples of how you can use OutputKey
:
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and 'my-transcript.json' as the
* OutputKey
, your transcription output path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json
* .
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the
* OutputBucketName
, and 'my-transcript' as the OutputKey
, your transcription output path
* is s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json
.
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and 'test-files/my-transcript.json' as
* the OutputKey
, your transcription output path is
* s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json
.
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the
* OutputBucketName
, and 'test-files/my-transcript' as the OutputKey
, your transcription
* output path is s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json
.
*
*
*
*
* If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist, one is created for you.
*
*
* @param outputKey
* Use in combination with OutputBucketName
to specify the output location of your transcript
* and, optionally, a unique name for your output file. The default name for your transcription output is the
* same as the name you specified for your transcription job (TranscriptionJobName
).
*
* Here are some examples of how you can use OutputKey
:
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and 'my-transcript.json' as the
* OutputKey
, your transcription output path is
* s3://DOC-EXAMPLE-BUCKET/my-transcript.json
.
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as
* the OutputBucketName
, and 'my-transcript' as the OutputKey
, your transcription
* output path is s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json
.
*
*
* -
*
* If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and
* 'test-files/my-transcript.json' as the OutputKey
, your transcription output path is
* s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json
.
*
*
* -
*
* If you specify 'my-first-transcription' as the TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as
* the OutputBucketName
, and 'test-files/my-transcript' as the OutputKey
, your
* transcription output path is
* s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json
.
*
*
*
*
* If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist, one is created for you.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withOutputKey(String outputKey) {
setOutputKey(outputKey);
return this;
}
/**
*
* The KMS key you want to use to encrypt your transcription output.
*
*
* If using a key located in the current Amazon Web Services account, you can specify your KMS key in one of
* four ways:
*
*
* -
*
* Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use an alias for the KMS key ID. For example, alias/ExampleAlias
.
*
*
* -
*
* Use the Amazon Resource Name (ARN) for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If using a key located in a different Amazon Web Services account than the current Amazon Web Services
* account, you can specify your KMS key in one of two ways:
*
*
* -
*
* Use the ARN for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If you do not specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3).
*
*
* If you specify a KMS key to encrypt your output, you must also specify an output location using the
* OutputLocation
parameter.
*
*
* Note that the role making the request must have permission to use the specified KMS key.
*
*
* @param outputEncryptionKMSKeyId
* The KMS key you want to use to encrypt your transcription output.
*
* If using a key located in the current Amazon Web Services account, you can specify your KMS key in
* one of four ways:
*
*
* -
*
* Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use an alias for the KMS key ID. For example, alias/ExampleAlias
.
*
*
* -
*
* Use the Amazon Resource Name (ARN) for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example,
* arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If using a key located in a different Amazon Web Services account than the current Amazon Web
* Services account, you can specify your KMS key in one of two ways:
*
*
* -
*
* Use the ARN for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example,
* arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If you do not specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3).
*
*
* If you specify a KMS key to encrypt your output, you must also specify an output location using the
* OutputLocation
parameter.
*
*
* Note that the role making the request must have permission to use the specified KMS key.
*/
public void setOutputEncryptionKMSKeyId(String outputEncryptionKMSKeyId) {
this.outputEncryptionKMSKeyId = outputEncryptionKMSKeyId;
}
/**
*
* The KMS key you want to use to encrypt your transcription output.
*
*
* If using a key located in the current Amazon Web Services account, you can specify your KMS key in one of
* four ways:
*
*
* -
*
* Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use an alias for the KMS key ID. For example, alias/ExampleAlias
.
*
*
* -
*
* Use the Amazon Resource Name (ARN) for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If using a key located in a different Amazon Web Services account than the current Amazon Web Services
* account, you can specify your KMS key in one of two ways:
*
*
* -
*
* Use the ARN for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If you do not specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3).
*
*
* If you specify a KMS key to encrypt your output, you must also specify an output location using the
* OutputLocation
parameter.
*
*
* Note that the role making the request must have permission to use the specified KMS key.
*
*
* @return The KMS key you want to use to encrypt your transcription output.
*
* If using a key located in the current Amazon Web Services account, you can specify your KMS key in
* one of four ways:
*
*
* -
*
* Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use an alias for the KMS key ID. For example, alias/ExampleAlias
.
*
*
* -
*
* Use the Amazon Resource Name (ARN) for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example,
* arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If using a key located in a different Amazon Web Services account than the current Amazon Web
* Services account, you can specify your KMS key in one of two ways:
*
*
* -
*
* Use the ARN for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example,
* arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If you do not specify an encryption key, your output is encrypted with the default Amazon S3 key
* (SSE-S3).
*
*
* If you specify a KMS key to encrypt your output, you must also specify an output location using the
* OutputLocation
parameter.
*
*
* Note that the role making the request must have permission to use the specified KMS key.
*/
public String getOutputEncryptionKMSKeyId() {
return this.outputEncryptionKMSKeyId;
}
/**
*
* The KMS key you want to use to encrypt your transcription output.
*
*
* If using a key located in the current Amazon Web Services account, you can specify your KMS key in one of
* four ways:
*
*
* -
*
* Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use an alias for the KMS key ID. For example, alias/ExampleAlias
.
*
*
* -
*
* Use the Amazon Resource Name (ARN) for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If using a key located in a different Amazon Web Services account than the current Amazon Web Services
* account, you can specify your KMS key in one of two ways:
*
*
* -
*
* Use the ARN for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If you do not specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3).
*
*
* If you specify a KMS key to encrypt your output, you must also specify an output location using the
* OutputLocation
parameter.
*
*
* Note that the role making the request must have permission to use the specified KMS key.
*
*
* @param outputEncryptionKMSKeyId
* The KMS key you want to use to encrypt your transcription output.
*
* If using a key located in the current Amazon Web Services account, you can specify your KMS key in
* one of four ways:
*
*
* -
*
* Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use an alias for the KMS key ID. For example, alias/ExampleAlias
.
*
*
* -
*
* Use the Amazon Resource Name (ARN) for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example,
* arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If using a key located in a different Amazon Web Services account than the current Amazon Web
* Services account, you can specify your KMS key in one of two ways:
*
*
* -
*
* Use the ARN for the KMS key ID. For example,
* arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
*
*
* -
*
* Use the ARN for the KMS key alias. For example,
* arn:aws:kms:region:account-ID:alias/ExampleAlias
.
*
*
*
*
* If you do not specify an encryption key, your output is encrypted with the default Amazon S3 key (SSE-S3).
*
*
* If you specify a KMS key to encrypt your output, you must also specify an output location using the
* OutputLocation
parameter.
*
*
* Note that the role making the request must have permission to use the specified KMS key.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withOutputEncryptionKMSKeyId(String outputEncryptionKMSKeyId) {
setOutputEncryptionKMSKeyId(outputEncryptionKMSKeyId);
return this;
}
/**
*
* A map of plain text, non-secret key:value pairs, known as encryption context pairs, that provide an added layer
* of security for your data. For more information, see KMS encryption
* context and Asymmetric
* keys in KMS.
*
*
* @return A map of plain text, non-secret key:value pairs, known as encryption context pairs, that provide an added
* layer of security for your data. For more information, see KMS encryption
* context and Asymmetric keys in
* KMS.
*/
public java.util.Map getKMSEncryptionContext() {
return kMSEncryptionContext;
}
/**
*
* A map of plain text, non-secret key:value pairs, known as encryption context pairs, that provide an added layer
* of security for your data. For more information, see KMS encryption
* context and Asymmetric
* keys in KMS.
*
*
* @param kMSEncryptionContext
* A map of plain text, non-secret key:value pairs, known as encryption context pairs, that provide an added
* layer of security for your data. For more information, see KMS encryption
* context and Asymmetric keys in
* KMS.
*/
public void setKMSEncryptionContext(java.util.Map kMSEncryptionContext) {
this.kMSEncryptionContext = kMSEncryptionContext;
}
/**
*
* A map of plain text, non-secret key:value pairs, known as encryption context pairs, that provide an added layer
* of security for your data. For more information, see KMS encryption
* context and Asymmetric
* keys in KMS.
*
*
* @param kMSEncryptionContext
* A map of plain text, non-secret key:value pairs, known as encryption context pairs, that provide an added
* layer of security for your data. For more information, see KMS encryption
* context and Asymmetric keys in
* KMS.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withKMSEncryptionContext(java.util.Map kMSEncryptionContext) {
setKMSEncryptionContext(kMSEncryptionContext);
return this;
}
/**
* Add a single KMSEncryptionContext entry
*
* @see StartTranscriptionJobRequest#withKMSEncryptionContext
* @returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest addKMSEncryptionContextEntry(String key, String value) {
if (null == this.kMSEncryptionContext) {
this.kMSEncryptionContext = new java.util.HashMap();
}
if (this.kMSEncryptionContext.containsKey(key))
throw new IllegalArgumentException("Duplicated keys (" + key.toString() + ") are provided.");
this.kMSEncryptionContext.put(key, value);
return this;
}
/**
* Removes all the entries added into KMSEncryptionContext.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest clearKMSEncryptionContextEntries() {
this.kMSEncryptionContext = null;
return this;
}
/**
*
* Specify additional optional settings in your request, including channel identification, alternative
* transcriptions, speaker partitioning. You can use that to apply custom vocabularies and vocabulary filters.
*
*
* If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do
* not want to use automatic language identification, use Settings
with the
* VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
*
*
* If you're using automatic language identification with your request and want to include a custom language model,
* a custom vocabulary, or a custom vocabulary filter, use instead the
*
parameter with the LanguageModelName
, VocabularyName
or
* VocabularyFilterName
sub-parameters.
*
*
* @param settings
* Specify additional optional settings in your request, including channel identification, alternative
* transcriptions, speaker partitioning. You can use that to apply custom vocabularies and vocabulary
* filters.
*
* If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but
* do not want to use automatic language identification, use Settings
with the
* VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
*
*
* If you're using automatic language identification with your request and want to include a custom language
* model, a custom vocabulary, or a custom vocabulary filter, use instead the
*
parameter with the LanguageModelName
, VocabularyName
or
* VocabularyFilterName
sub-parameters.
*/
public void setSettings(Settings settings) {
this.settings = settings;
}
/**
*
* Specify additional optional settings in your request, including channel identification, alternative
* transcriptions, speaker partitioning. You can use that to apply custom vocabularies and vocabulary filters.
*
*
* If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do
* not want to use automatic language identification, use Settings
with the
* VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
*
*
* If you're using automatic language identification with your request and want to include a custom language model,
* a custom vocabulary, or a custom vocabulary filter, use instead the
*
parameter with the LanguageModelName
, VocabularyName
or
* VocabularyFilterName
sub-parameters.
*
*
* @return Specify additional optional settings in your request, including channel identification, alternative
* transcriptions, speaker partitioning. You can use that to apply custom vocabularies and vocabulary
* filters.
*
* If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but
* do not want to use automatic language identification, use Settings
with the
* VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
*
*
* If you're using automatic language identification with your request and want to include a custom language
* model, a custom vocabulary, or a custom vocabulary filter, use instead the
*
parameter with the LanguageModelName
, VocabularyName
or
* VocabularyFilterName
sub-parameters.
*/
public Settings getSettings() {
return this.settings;
}
/**
*
* Specify additional optional settings in your request, including channel identification, alternative
* transcriptions, speaker partitioning. You can use that to apply custom vocabularies and vocabulary filters.
*
*
* If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do
* not want to use automatic language identification, use Settings
with the
* VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
*
*
* If you're using automatic language identification with your request and want to include a custom language model,
* a custom vocabulary, or a custom vocabulary filter, use instead the
*
parameter with the LanguageModelName
, VocabularyName
or
* VocabularyFilterName
sub-parameters.
*
*
* @param settings
* Specify additional optional settings in your request, including channel identification, alternative
* transcriptions, speaker partitioning. You can use that to apply custom vocabularies and vocabulary
* filters.
*
* If you want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but
* do not want to use automatic language identification, use Settings
with the
* VocabularyName
or VocabularyFilterName
(or both) sub-parameter.
*
*
* If you're using automatic language identification with your request and want to include a custom language
* model, a custom vocabulary, or a custom vocabulary filter, use instead the
*
parameter with the LanguageModelName
, VocabularyName
or
* VocabularyFilterName
sub-parameters.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withSettings(Settings settings) {
setSettings(settings);
return this;
}
/**
*
* Specify the custom language model you want to include with your transcription job. If you include
* ModelSettings
in your request, you must include the LanguageModelName
sub-parameter.
*
*
* For more information, see Custom language models.
*
*
* @param modelSettings
* Specify the custom language model you want to include with your transcription job. If you include
* ModelSettings
in your request, you must include the LanguageModelName
* sub-parameter.
*
* For more information, see Custom language
* models.
*/
public void setModelSettings(ModelSettings modelSettings) {
this.modelSettings = modelSettings;
}
/**
*
* Specify the custom language model you want to include with your transcription job. If you include
* ModelSettings
in your request, you must include the LanguageModelName
sub-parameter.
*
*
* For more information, see Custom language models.
*
*
* @return Specify the custom language model you want to include with your transcription job. If you include
* ModelSettings
in your request, you must include the LanguageModelName
* sub-parameter.
*
* For more information, see Custom language
* models.
*/
public ModelSettings getModelSettings() {
return this.modelSettings;
}
/**
*
* Specify the custom language model you want to include with your transcription job. If you include
* ModelSettings
in your request, you must include the LanguageModelName
sub-parameter.
*
*
* For more information, see Custom language models.
*
*
* @param modelSettings
* Specify the custom language model you want to include with your transcription job. If you include
* ModelSettings
in your request, you must include the LanguageModelName
* sub-parameter.
*
* For more information, see Custom language
* models.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withModelSettings(ModelSettings modelSettings) {
setModelSettings(modelSettings);
return this;
}
/**
*
* Makes it possible to control how your transcription job is processed. Currently, the only
* JobExecutionSettings
modification you can choose is enabling job queueing using the
* AllowDeferredExecution
sub-parameter.
*
*
* If you include JobExecutionSettings
in your request, you must also include the sub-parameters:
* AllowDeferredExecution
and DataAccessRoleArn
.
*
*
* @param jobExecutionSettings
* Makes it possible to control how your transcription job is processed. Currently, the only
* JobExecutionSettings
modification you can choose is enabling job queueing using the
* AllowDeferredExecution
sub-parameter.
*
* If you include JobExecutionSettings
in your request, you must also include the
* sub-parameters: AllowDeferredExecution
and DataAccessRoleArn
.
*/
public void setJobExecutionSettings(JobExecutionSettings jobExecutionSettings) {
this.jobExecutionSettings = jobExecutionSettings;
}
/**
*
* Makes it possible to control how your transcription job is processed. Currently, the only
* JobExecutionSettings
modification you can choose is enabling job queueing using the
* AllowDeferredExecution
sub-parameter.
*
*
* If you include JobExecutionSettings
in your request, you must also include the sub-parameters:
* AllowDeferredExecution
and DataAccessRoleArn
.
*
*
* @return Makes it possible to control how your transcription job is processed. Currently, the only
* JobExecutionSettings
modification you can choose is enabling job queueing using the
* AllowDeferredExecution
sub-parameter.
*
* If you include JobExecutionSettings
in your request, you must also include the
* sub-parameters: AllowDeferredExecution
and DataAccessRoleArn
.
*/
public JobExecutionSettings getJobExecutionSettings() {
return this.jobExecutionSettings;
}
/**
*
* Makes it possible to control how your transcription job is processed. Currently, the only
* JobExecutionSettings
modification you can choose is enabling job queueing using the
* AllowDeferredExecution
sub-parameter.
*
*
* If you include JobExecutionSettings
in your request, you must also include the sub-parameters:
* AllowDeferredExecution
and DataAccessRoleArn
.
*
*
* @param jobExecutionSettings
* Makes it possible to control how your transcription job is processed. Currently, the only
* JobExecutionSettings
modification you can choose is enabling job queueing using the
* AllowDeferredExecution
sub-parameter.
*
* If you include JobExecutionSettings
in your request, you must also include the
* sub-parameters: AllowDeferredExecution
and DataAccessRoleArn
.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withJobExecutionSettings(JobExecutionSettings jobExecutionSettings) {
setJobExecutionSettings(jobExecutionSettings);
return this;
}
/**
*
* Makes it possible to redact or flag specified personally identifiable information (PII) in your transcript. If
* you use ContentRedaction
, you must also include the sub-parameters: RedactionOutput
and
* RedactionType
. You can optionally include PiiEntityTypes
to choose which types of PII
* you want to redact. If you do not include PiiEntityTypes
in your request, all PII is redacted.
*
*
* @param contentRedaction
* Makes it possible to redact or flag specified personally identifiable information (PII) in your
* transcript. If you use ContentRedaction
, you must also include the sub-parameters:
* RedactionOutput
and RedactionType
. You can optionally include
* PiiEntityTypes
to choose which types of PII you want to redact. If you do not include
* PiiEntityTypes
in your request, all PII is redacted.
*/
public void setContentRedaction(ContentRedaction contentRedaction) {
this.contentRedaction = contentRedaction;
}
/**
*
* Makes it possible to redact or flag specified personally identifiable information (PII) in your transcript. If
* you use ContentRedaction
, you must also include the sub-parameters: RedactionOutput
and
* RedactionType
. You can optionally include PiiEntityTypes
to choose which types of PII
* you want to redact. If you do not include PiiEntityTypes
in your request, all PII is redacted.
*
*
* @return Makes it possible to redact or flag specified personally identifiable information (PII) in your
* transcript. If you use ContentRedaction
, you must also include the sub-parameters:
* RedactionOutput
and RedactionType
. You can optionally include
* PiiEntityTypes
to choose which types of PII you want to redact. If you do not include
* PiiEntityTypes
in your request, all PII is redacted.
*/
public ContentRedaction getContentRedaction() {
return this.contentRedaction;
}
/**
*
* Makes it possible to redact or flag specified personally identifiable information (PII) in your transcript. If
* you use ContentRedaction
, you must also include the sub-parameters: RedactionOutput
and
* RedactionType
. You can optionally include PiiEntityTypes
to choose which types of PII
* you want to redact. If you do not include PiiEntityTypes
in your request, all PII is redacted.
*
*
* @param contentRedaction
* Makes it possible to redact or flag specified personally identifiable information (PII) in your
* transcript. If you use ContentRedaction
, you must also include the sub-parameters:
* RedactionOutput
and RedactionType
. You can optionally include
* PiiEntityTypes
to choose which types of PII you want to redact. If you do not include
* PiiEntityTypes
in your request, all PII is redacted.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withContentRedaction(ContentRedaction contentRedaction) {
setContentRedaction(contentRedaction);
return this;
}
/**
*
* Enables automatic language identification in your transcription job request. Use this parameter if your media
* file contains only one language. If your media contains multiple languages, use
* IdentifyMultipleLanguages
instead.
*
*
* If you include IdentifyLanguage
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your
* automatic language identification request, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* @param identifyLanguage
* Enables automatic language identification in your transcription job request. Use this parameter if your
* media file contains only one language. If your media contains multiple languages, use
* IdentifyMultipleLanguages
instead.
*
* If you include IdentifyLanguage
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your
* automatic language identification request, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters,
* your transcription job fails.
*/
public void setIdentifyLanguage(Boolean identifyLanguage) {
this.identifyLanguage = identifyLanguage;
}
/**
*
* Enables automatic language identification in your transcription job request. Use this parameter if your media
* file contains only one language. If your media contains multiple languages, use
* IdentifyMultipleLanguages
instead.
*
*
* If you include IdentifyLanguage
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your
* automatic language identification request, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* @return Enables automatic language identification in your transcription job request. Use this parameter if your
* media file contains only one language. If your media contains multiple languages, use
* IdentifyMultipleLanguages
instead.
*
* If you include IdentifyLanguage
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that
* you specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your
* automatic language identification request, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters,
* your transcription job fails.
*/
public Boolean getIdentifyLanguage() {
return this.identifyLanguage;
}
/**
*
* Enables automatic language identification in your transcription job request. Use this parameter if your media
* file contains only one language. If your media contains multiple languages, use
* IdentifyMultipleLanguages
instead.
*
*
* If you include IdentifyLanguage
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your
* automatic language identification request, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* @param identifyLanguage
* Enables automatic language identification in your transcription job request. Use this parameter if your
* media file contains only one language. If your media contains multiple languages, use
* IdentifyMultipleLanguages
instead.
*
* If you include IdentifyLanguage
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your
* automatic language identification request, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters,
* your transcription job fails.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withIdentifyLanguage(Boolean identifyLanguage) {
setIdentifyLanguage(identifyLanguage);
return this;
}
/**
*
* Enables automatic language identification in your transcription job request. Use this parameter if your media
* file contains only one language. If your media contains multiple languages, use
* IdentifyMultipleLanguages
instead.
*
*
* If you include IdentifyLanguage
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your
* automatic language identification request, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* @return Enables automatic language identification in your transcription job request. Use this parameter if your
* media file contains only one language. If your media contains multiple languages, use
* IdentifyMultipleLanguages
instead.
*
* If you include IdentifyLanguage
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that
* you specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom language model, a custom vocabulary, or a custom vocabulary filter to your
* automatic language identification request, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters,
* your transcription job fails.
*/
public Boolean isIdentifyLanguage() {
return this.identifyLanguage;
}
/**
*
* Enables automatic multi-language identification in your transcription job request. Use this parameter if your
* media file contains more than one language. If your media contains only one language, use
* IdentifyLanguage
instead.
*
*
* If you include IdentifyMultipleLanguages
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language identification
* request, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
* and VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* @param identifyMultipleLanguages
* Enables automatic multi-language identification in your transcription job request. Use this parameter if
* your media file contains more than one language. If your media contains only one language, use
* IdentifyLanguage
instead.
*
* If you include IdentifyMultipleLanguages
, you can optionally include a list of language
* codes, using LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language
* identification request, include LanguageIdSettings
with the relevant sub-parameters (
* VocabularyName
and VocabularyFilterName
). If you include
* LanguageIdSettings
, also include LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters,
* your transcription job fails.
*/
public void setIdentifyMultipleLanguages(Boolean identifyMultipleLanguages) {
this.identifyMultipleLanguages = identifyMultipleLanguages;
}
/**
*
* Enables automatic multi-language identification in your transcription job request. Use this parameter if your
* media file contains more than one language. If your media contains only one language, use
* IdentifyLanguage
instead.
*
*
* If you include IdentifyMultipleLanguages
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language identification
* request, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
* and VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* @return Enables automatic multi-language identification in your transcription job request. Use this parameter if
* your media file contains more than one language. If your media contains only one language, use
* IdentifyLanguage
instead.
*
* If you include IdentifyMultipleLanguages
, you can optionally include a list of language
* codes, using LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that
* you specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language
* identification request, include LanguageIdSettings
with the relevant sub-parameters (
* VocabularyName
and VocabularyFilterName
). If you include
* LanguageIdSettings
, also include LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters,
* your transcription job fails.
*/
public Boolean getIdentifyMultipleLanguages() {
return this.identifyMultipleLanguages;
}
/**
*
* Enables automatic multi-language identification in your transcription job request. Use this parameter if your
* media file contains more than one language. If your media contains only one language, use
* IdentifyLanguage
instead.
*
*
* If you include IdentifyMultipleLanguages
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language identification
* request, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
* and VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* @param identifyMultipleLanguages
* Enables automatic multi-language identification in your transcription job request. Use this parameter if
* your media file contains more than one language. If your media contains only one language, use
* IdentifyLanguage
instead.
*
* If you include IdentifyMultipleLanguages
, you can optionally include a list of language
* codes, using LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language
* identification request, include LanguageIdSettings
with the relevant sub-parameters (
* VocabularyName
and VocabularyFilterName
). If you include
* LanguageIdSettings
, also include LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters,
* your transcription job fails.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withIdentifyMultipleLanguages(Boolean identifyMultipleLanguages) {
setIdentifyMultipleLanguages(identifyMultipleLanguages);
return this;
}
/**
*
* Enables automatic multi-language identification in your transcription job request. Use this parameter if your
* media file contains more than one language. If your media contains only one language, use
* IdentifyLanguage
instead.
*
*
* If you include IdentifyMultipleLanguages
, you can optionally include a list of language codes, using
* LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that you
* specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language identification
* request, include LanguageIdSettings
with the relevant sub-parameters (VocabularyName
* and VocabularyFilterName
). If you include LanguageIdSettings
, also include
* LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters, your
* transcription job fails.
*
*
* @return Enables automatic multi-language identification in your transcription job request. Use this parameter if
* your media file contains more than one language. If your media contains only one language, use
* IdentifyLanguage
instead.
*
* If you include IdentifyMultipleLanguages
, you can optionally include a list of language
* codes, using LanguageOptions
, that you think may be present in your media file. Including
* LanguageOptions
restricts IdentifyLanguage
to only the language options that
* you specify, which can improve transcription accuracy.
*
*
* If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic language
* identification request, include LanguageIdSettings
with the relevant sub-parameters (
* VocabularyName
and VocabularyFilterName
). If you include
* LanguageIdSettings
, also include LanguageOptions
.
*
*
* Note that you must include one of LanguageCode
, IdentifyLanguage
, or
* IdentifyMultipleLanguages
in your request. If you include more than one of these parameters,
* your transcription job fails.
*/
public Boolean isIdentifyMultipleLanguages() {
return this.identifyMultipleLanguages;
}
/**
*
* You can specify two or more language codes that represent the languages you think may be present in your media.
* Including more than five is not recommended. If you're unsure what languages are present, do not include this
* parameter.
*
*
* If you include LanguageOptions
in your request, you must also include IdentifyLanguage
.
*
*
* For more information, refer to Supported languages.
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample
* rate of 16,000 Hz or higher.
*
*
* @return You can specify two or more language codes that represent the languages you think may be present in your
* media. Including more than five is not recommended. If you're unsure what languages are present, do not
* include this parameter.
*
* If you include LanguageOptions
in your request, you must also include
* IdentifyLanguage
.
*
*
* For more information, refer to Supported languages.
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a
* sample rate of 16,000 Hz or higher.
* @see LanguageCode
*/
public java.util.List getLanguageOptions() {
return languageOptions;
}
/**
*
* You can specify two or more language codes that represent the languages you think may be present in your media.
* Including more than five is not recommended. If you're unsure what languages are present, do not include this
* parameter.
*
*
* If you include LanguageOptions
in your request, you must also include IdentifyLanguage
.
*
*
* For more information, refer to Supported languages.
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample
* rate of 16,000 Hz or higher.
*
*
* @param languageOptions
* You can specify two or more language codes that represent the languages you think may be present in your
* media. Including more than five is not recommended. If you're unsure what languages are present, do not
* include this parameter.
*
* If you include LanguageOptions
in your request, you must also include
* IdentifyLanguage
.
*
*
* For more information, refer to Supported languages.
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a
* sample rate of 16,000 Hz or higher.
* @see LanguageCode
*/
public void setLanguageOptions(java.util.Collection languageOptions) {
if (languageOptions == null) {
this.languageOptions = null;
return;
}
this.languageOptions = new java.util.ArrayList(languageOptions);
}
/**
*
* You can specify two or more language codes that represent the languages you think may be present in your media.
* Including more than five is not recommended. If you're unsure what languages are present, do not include this
* parameter.
*
*
* If you include LanguageOptions
in your request, you must also include IdentifyLanguage
.
*
*
* For more information, refer to Supported languages.
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample
* rate of 16,000 Hz or higher.
*
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setLanguageOptions(java.util.Collection)} or {@link #withLanguageOptions(java.util.Collection)} if you
* want to override the existing values.
*
*
* @param languageOptions
* You can specify two or more language codes that represent the languages you think may be present in your
* media. Including more than five is not recommended. If you're unsure what languages are present, do not
* include this parameter.
*
* If you include LanguageOptions
in your request, you must also include
* IdentifyLanguage
.
*
*
* For more information, refer to Supported languages.
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a
* sample rate of 16,000 Hz or higher.
* @return Returns a reference to this object so that method calls can be chained together.
* @see LanguageCode
*/
public StartTranscriptionJobRequest withLanguageOptions(String... languageOptions) {
if (this.languageOptions == null) {
setLanguageOptions(new java.util.ArrayList(languageOptions.length));
}
for (String ele : languageOptions) {
this.languageOptions.add(ele);
}
return this;
}
/**
*
* You can specify two or more language codes that represent the languages you think may be present in your media.
* Including more than five is not recommended. If you're unsure what languages are present, do not include this
* parameter.
*
*
* If you include LanguageOptions
in your request, you must also include IdentifyLanguage
.
*
*
* For more information, refer to Supported languages.
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample
* rate of 16,000 Hz or higher.
*
*
* @param languageOptions
* You can specify two or more language codes that represent the languages you think may be present in your
* media. Including more than five is not recommended. If you're unsure what languages are present, do not
* include this parameter.
*
* If you include LanguageOptions
in your request, you must also include
* IdentifyLanguage
.
*
*
* For more information, refer to Supported languages.
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a
* sample rate of 16,000 Hz or higher.
* @return Returns a reference to this object so that method calls can be chained together.
* @see LanguageCode
*/
public StartTranscriptionJobRequest withLanguageOptions(java.util.Collection languageOptions) {
setLanguageOptions(languageOptions);
return this;
}
/**
*
* You can specify two or more language codes that represent the languages you think may be present in your media.
* Including more than five is not recommended. If you're unsure what languages are present, do not include this
* parameter.
*
*
* If you include LanguageOptions
in your request, you must also include IdentifyLanguage
.
*
*
* For more information, refer to Supported languages.
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a sample
* rate of 16,000 Hz or higher.
*
*
* @param languageOptions
* You can specify two or more language codes that represent the languages you think may be present in your
* media. Including more than five is not recommended. If you're unsure what languages are present, do not
* include this parameter.
*
* If you include LanguageOptions
in your request, you must also include
* IdentifyLanguage
.
*
*
* For more information, refer to Supported languages.
*
*
* To transcribe speech in Modern Standard Arabic (ar-SA
), your media file must be encoded at a
* sample rate of 16,000 Hz or higher.
* @return Returns a reference to this object so that method calls can be chained together.
* @see LanguageCode
*/
public StartTranscriptionJobRequest withLanguageOptions(LanguageCode... languageOptions) {
java.util.ArrayList languageOptionsCopy = new java.util.ArrayList(languageOptions.length);
for (LanguageCode value : languageOptions) {
languageOptionsCopy.add(value.toString());
}
if (getLanguageOptions() == null) {
setLanguageOptions(languageOptionsCopy);
} else {
getLanguageOptions().addAll(languageOptionsCopy);
}
return this;
}
/**
*
* Produces subtitle files for your input media. You can specify WebVTT (*.vtt) and SubRip (*.srt) formats.
*
*
* @param subtitles
* Produces subtitle files for your input media. You can specify WebVTT (*.vtt) and SubRip (*.srt) formats.
*/
public void setSubtitles(Subtitles subtitles) {
this.subtitles = subtitles;
}
/**
*
* Produces subtitle files for your input media. You can specify WebVTT (*.vtt) and SubRip (*.srt) formats.
*
*
* @return Produces subtitle files for your input media. You can specify WebVTT (*.vtt) and SubRip (*.srt) formats.
*/
public Subtitles getSubtitles() {
return this.subtitles;
}
/**
*
* Produces subtitle files for your input media. You can specify WebVTT (*.vtt) and SubRip (*.srt) formats.
*
*
* @param subtitles
* Produces subtitle files for your input media. You can specify WebVTT (*.vtt) and SubRip (*.srt) formats.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withSubtitles(Subtitles subtitles) {
setSubtitles(subtitles);
return this;
}
/**
*
* Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time you
* start this new job.
*
*
* To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
*
*
* @return Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the
* time you start this new job.
*
* To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
*/
public java.util.List getTags() {
return tags;
}
/**
*
* Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time you
* start this new job.
*
*
* To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
*
*
* @param tags
* Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time
* you start this new job.
*
* To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
*/
public void setTags(java.util.Collection tags) {
if (tags == null) {
this.tags = null;
return;
}
this.tags = new java.util.ArrayList(tags);
}
/**
*
* Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time you
* start this new job.
*
*
* To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
*
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setTags(java.util.Collection)} or {@link #withTags(java.util.Collection)} if you want to override the
* existing values.
*
*
* @param tags
* Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time
* you start this new job.
*
* To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withTags(Tag... tags) {
if (this.tags == null) {
setTags(new java.util.ArrayList(tags.length));
}
for (Tag ele : tags) {
this.tags.add(ele);
}
return this;
}
/**
*
* Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time you
* start this new job.
*
*
* To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
*
*
* @param tags
* Adds one or more custom tags, each in the form of a key:value pair, to a new transcription job at the time
* you start this new job.
*
* To learn more about using tags with Amazon Transcribe, refer to Tagging resources.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withTags(java.util.Collection tags) {
setTags(tags);
return this;
}
/**
*
* If using automatic language identification in your request and you want to apply a custom language model, a
* custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). Note that multi-language identification (
* IdentifyMultipleLanguages
) doesn't support custom language models.
*
*
* LanguageIdSettings
supports two to five language codes. Each language code you include can have an
* associated custom language model, custom vocabulary, and custom vocabulary filter. The language codes that you
* specify must match the languages of the associated custom language models, custom vocabularies, and custom
* vocabulary filters.
*
*
* It's recommended that you include LanguageOptions
when using LanguageIdSettings
to
* ensure that the correct language dialect is identified. For example, if you specify a custom vocabulary that is
* in en-US
but Amazon Transcribe determines that the language spoken in your media is
* en-AU
, your custom vocabulary is not applied to your transcription. If you include
* LanguageOptions
and include en-US
as the only English language dialect, your custom
* vocabulary is applied to your transcription.
*
*
* If you want to include a custom language model with your request but do not want to use automatic language
* identification, use instead the
parameter with the LanguageModelName
sub-parameter. If you
* want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not
* want to use automatic language identification, use instead the
*
parameter with the VocabularyName
or VocabularyFilterName
(or both)
* sub-parameter.
*
*
* @return If using automatic language identification in your request and you want to apply a custom language model,
* a custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the
* relevant sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). Note that multi-language identification (
* IdentifyMultipleLanguages
) doesn't support custom language models.
*
* LanguageIdSettings
supports two to five language codes. Each language code you include can
* have an associated custom language model, custom vocabulary, and custom vocabulary filter. The language
* codes that you specify must match the languages of the associated custom language models, custom
* vocabularies, and custom vocabulary filters.
*
*
* It's recommended that you include LanguageOptions
when using LanguageIdSettings
* to ensure that the correct language dialect is identified. For example, if you specify a custom
* vocabulary that is in en-US
but Amazon Transcribe determines that the language spoken in
* your media is en-AU
, your custom vocabulary is not applied to your transcription. If
* you include LanguageOptions
and include en-US
as the only English language
* dialect, your custom vocabulary is applied to your transcription.
*
*
* If you want to include a custom language model with your request but do not want to use automatic
* language identification, use instead the
parameter with the LanguageModelName
* sub-parameter. If you want to include a custom vocabulary or a custom vocabulary filter (or both) with
* your request but do not want to use automatic language identification, use instead the
*
parameter with the VocabularyName
or VocabularyFilterName
(or both)
* sub-parameter.
*/
public java.util.Map getLanguageIdSettings() {
return languageIdSettings;
}
/**
*
* If using automatic language identification in your request and you want to apply a custom language model, a
* custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). Note that multi-language identification (
* IdentifyMultipleLanguages
) doesn't support custom language models.
*
*
* LanguageIdSettings
supports two to five language codes. Each language code you include can have an
* associated custom language model, custom vocabulary, and custom vocabulary filter. The language codes that you
* specify must match the languages of the associated custom language models, custom vocabularies, and custom
* vocabulary filters.
*
*
* It's recommended that you include LanguageOptions
when using LanguageIdSettings
to
* ensure that the correct language dialect is identified. For example, if you specify a custom vocabulary that is
* in en-US
but Amazon Transcribe determines that the language spoken in your media is
* en-AU
, your custom vocabulary is not applied to your transcription. If you include
* LanguageOptions
and include en-US
as the only English language dialect, your custom
* vocabulary is applied to your transcription.
*
*
* If you want to include a custom language model with your request but do not want to use automatic language
* identification, use instead the
parameter with the LanguageModelName
sub-parameter. If you
* want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not
* want to use automatic language identification, use instead the
*
parameter with the VocabularyName
or VocabularyFilterName
(or both)
* sub-parameter.
*
*
* @param languageIdSettings
* If using automatic language identification in your request and you want to apply a custom language model,
* a custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the
* relevant sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). Note that multi-language identification (
* IdentifyMultipleLanguages
) doesn't support custom language models.
*
* LanguageIdSettings
supports two to five language codes. Each language code you include can
* have an associated custom language model, custom vocabulary, and custom vocabulary filter. The language
* codes that you specify must match the languages of the associated custom language models, custom
* vocabularies, and custom vocabulary filters.
*
*
* It's recommended that you include LanguageOptions
when using LanguageIdSettings
* to ensure that the correct language dialect is identified. For example, if you specify a custom vocabulary
* that is in en-US
but Amazon Transcribe determines that the language spoken in your media is
* en-AU
, your custom vocabulary is not applied to your transcription. If you include
* LanguageOptions
and include en-US
as the only English language dialect, your
* custom vocabulary is applied to your transcription.
*
*
* If you want to include a custom language model with your request but do not want to use automatic
* language identification, use instead the
parameter with the LanguageModelName
* sub-parameter. If you want to include a custom vocabulary or a custom vocabulary filter (or both) with
* your request but do not want to use automatic language identification, use instead the
*
parameter with the VocabularyName
or VocabularyFilterName
(or both)
* sub-parameter.
*/
public void setLanguageIdSettings(java.util.Map languageIdSettings) {
this.languageIdSettings = languageIdSettings;
}
/**
*
* If using automatic language identification in your request and you want to apply a custom language model, a
* custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the relevant
* sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). Note that multi-language identification (
* IdentifyMultipleLanguages
) doesn't support custom language models.
*
*
* LanguageIdSettings
supports two to five language codes. Each language code you include can have an
* associated custom language model, custom vocabulary, and custom vocabulary filter. The language codes that you
* specify must match the languages of the associated custom language models, custom vocabularies, and custom
* vocabulary filters.
*
*
* It's recommended that you include LanguageOptions
when using LanguageIdSettings
to
* ensure that the correct language dialect is identified. For example, if you specify a custom vocabulary that is
* in en-US
but Amazon Transcribe determines that the language spoken in your media is
* en-AU
, your custom vocabulary is not applied to your transcription. If you include
* LanguageOptions
and include en-US
as the only English language dialect, your custom
* vocabulary is applied to your transcription.
*
*
* If you want to include a custom language model with your request but do not want to use automatic language
* identification, use instead the
parameter with the LanguageModelName
sub-parameter. If you
* want to include a custom vocabulary or a custom vocabulary filter (or both) with your request but do not
* want to use automatic language identification, use instead the
*
parameter with the VocabularyName
or VocabularyFilterName
(or both)
* sub-parameter.
*
*
* @param languageIdSettings
* If using automatic language identification in your request and you want to apply a custom language model,
* a custom vocabulary, or a custom vocabulary filter, include LanguageIdSettings
with the
* relevant sub-parameters (VocabularyName
, LanguageModelName
, and
* VocabularyFilterName
). Note that multi-language identification (
* IdentifyMultipleLanguages
) doesn't support custom language models.
*
* LanguageIdSettings
supports two to five language codes. Each language code you include can
* have an associated custom language model, custom vocabulary, and custom vocabulary filter. The language
* codes that you specify must match the languages of the associated custom language models, custom
* vocabularies, and custom vocabulary filters.
*
*
* It's recommended that you include LanguageOptions
when using LanguageIdSettings
* to ensure that the correct language dialect is identified. For example, if you specify a custom vocabulary
* that is in en-US
but Amazon Transcribe determines that the language spoken in your media is
* en-AU
, your custom vocabulary is not applied to your transcription. If you include
* LanguageOptions
and include en-US
as the only English language dialect, your
* custom vocabulary is applied to your transcription.
*
*
* If you want to include a custom language model with your request but do not want to use automatic
* language identification, use instead the
parameter with the LanguageModelName
* sub-parameter. If you want to include a custom vocabulary or a custom vocabulary filter (or both) with
* your request but do not want to use automatic language identification, use instead the
*
parameter with the VocabularyName
or VocabularyFilterName
(or both)
* sub-parameter.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withLanguageIdSettings(java.util.Map languageIdSettings) {
setLanguageIdSettings(languageIdSettings);
return this;
}
/**
* Add a single LanguageIdSettings entry
*
* @see StartTranscriptionJobRequest#withLanguageIdSettings
* @returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest addLanguageIdSettingsEntry(String key, LanguageIdSettings value) {
if (null == this.languageIdSettings) {
this.languageIdSettings = new java.util.HashMap();
}
if (this.languageIdSettings.containsKey(key))
throw new IllegalArgumentException("Duplicated keys (" + key.toString() + ") are provided.");
this.languageIdSettings.put(key, value);
return this;
}
/**
* Removes all the entries added into LanguageIdSettings.
*
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest clearLanguageIdSettingsEntries() {
this.languageIdSettings = null;
return this;
}
/**
*
* Enables toxic speech detection in your transcript. If you include ToxicityDetection
in your request,
* you must also include ToxicityCategories
.
*
*
* For information on the types of toxic speech Amazon Transcribe can detect, see Detecting toxic speech.
*
*
* @return Enables toxic speech detection in your transcript. If you include ToxicityDetection
in your
* request, you must also include ToxicityCategories
.
*
* For information on the types of toxic speech Amazon Transcribe can detect, see Detecting toxic speech.
*/
public java.util.List getToxicityDetection() {
return toxicityDetection;
}
/**
*
* Enables toxic speech detection in your transcript. If you include ToxicityDetection
in your request,
* you must also include ToxicityCategories
.
*
*
* For information on the types of toxic speech Amazon Transcribe can detect, see Detecting toxic speech.
*
*
* @param toxicityDetection
* Enables toxic speech detection in your transcript. If you include ToxicityDetection
in your
* request, you must also include ToxicityCategories
.
*
* For information on the types of toxic speech Amazon Transcribe can detect, see Detecting toxic speech.
*/
public void setToxicityDetection(java.util.Collection toxicityDetection) {
if (toxicityDetection == null) {
this.toxicityDetection = null;
return;
}
this.toxicityDetection = new java.util.ArrayList(toxicityDetection);
}
/**
*
* Enables toxic speech detection in your transcript. If you include ToxicityDetection
in your request,
* you must also include ToxicityCategories
.
*
*
* For information on the types of toxic speech Amazon Transcribe can detect, see Detecting toxic speech.
*
*
* NOTE: This method appends the values to the existing list (if any). Use
* {@link #setToxicityDetection(java.util.Collection)} or {@link #withToxicityDetection(java.util.Collection)} if
* you want to override the existing values.
*
*
* @param toxicityDetection
* Enables toxic speech detection in your transcript. If you include ToxicityDetection
in your
* request, you must also include ToxicityCategories
.
*
* For information on the types of toxic speech Amazon Transcribe can detect, see Detecting toxic speech.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withToxicityDetection(ToxicityDetectionSettings... toxicityDetection) {
if (this.toxicityDetection == null) {
setToxicityDetection(new java.util.ArrayList(toxicityDetection.length));
}
for (ToxicityDetectionSettings ele : toxicityDetection) {
this.toxicityDetection.add(ele);
}
return this;
}
/**
*
* Enables toxic speech detection in your transcript. If you include ToxicityDetection
in your request,
* you must also include ToxicityCategories
.
*
*
* For information on the types of toxic speech Amazon Transcribe can detect, see Detecting toxic speech.
*
*
* @param toxicityDetection
* Enables toxic speech detection in your transcript. If you include ToxicityDetection
in your
* request, you must also include ToxicityCategories
.
*
* For information on the types of toxic speech Amazon Transcribe can detect, see Detecting toxic speech.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public StartTranscriptionJobRequest withToxicityDetection(java.util.Collection toxicityDetection) {
setToxicityDetection(toxicityDetection);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getTranscriptionJobName() != null)
sb.append("TranscriptionJobName: ").append(getTranscriptionJobName()).append(",");
if (getLanguageCode() != null)
sb.append("LanguageCode: ").append(getLanguageCode()).append(",");
if (getMediaSampleRateHertz() != null)
sb.append("MediaSampleRateHertz: ").append(getMediaSampleRateHertz()).append(",");
if (getMediaFormat() != null)
sb.append("MediaFormat: ").append(getMediaFormat()).append(",");
if (getMedia() != null)
sb.append("Media: ").append(getMedia()).append(",");
if (getOutputBucketName() != null)
sb.append("OutputBucketName: ").append(getOutputBucketName()).append(",");
if (getOutputKey() != null)
sb.append("OutputKey: ").append(getOutputKey()).append(",");
if (getOutputEncryptionKMSKeyId() != null)
sb.append("OutputEncryptionKMSKeyId: ").append(getOutputEncryptionKMSKeyId()).append(",");
if (getKMSEncryptionContext() != null)
sb.append("KMSEncryptionContext: ").append(getKMSEncryptionContext()).append(",");
if (getSettings() != null)
sb.append("Settings: ").append(getSettings()).append(",");
if (getModelSettings() != null)
sb.append("ModelSettings: ").append(getModelSettings()).append(",");
if (getJobExecutionSettings() != null)
sb.append("JobExecutionSettings: ").append(getJobExecutionSettings()).append(",");
if (getContentRedaction() != null)
sb.append("ContentRedaction: ").append(getContentRedaction()).append(",");
if (getIdentifyLanguage() != null)
sb.append("IdentifyLanguage: ").append(getIdentifyLanguage()).append(",");
if (getIdentifyMultipleLanguages() != null)
sb.append("IdentifyMultipleLanguages: ").append(getIdentifyMultipleLanguages()).append(",");
if (getLanguageOptions() != null)
sb.append("LanguageOptions: ").append(getLanguageOptions()).append(",");
if (getSubtitles() != null)
sb.append("Subtitles: ").append(getSubtitles()).append(",");
if (getTags() != null)
sb.append("Tags: ").append(getTags()).append(",");
if (getLanguageIdSettings() != null)
sb.append("LanguageIdSettings: ").append(getLanguageIdSettings()).append(",");
if (getToxicityDetection() != null)
sb.append("ToxicityDetection: ").append(getToxicityDetection());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof StartTranscriptionJobRequest == false)
return false;
StartTranscriptionJobRequest other = (StartTranscriptionJobRequest) obj;
if (other.getTranscriptionJobName() == null ^ this.getTranscriptionJobName() == null)
return false;
if (other.getTranscriptionJobName() != null && other.getTranscriptionJobName().equals(this.getTranscriptionJobName()) == false)
return false;
if (other.getLanguageCode() == null ^ this.getLanguageCode() == null)
return false;
if (other.getLanguageCode() != null && other.getLanguageCode().equals(this.getLanguageCode()) == false)
return false;
if (other.getMediaSampleRateHertz() == null ^ this.getMediaSampleRateHertz() == null)
return false;
if (other.getMediaSampleRateHertz() != null && other.getMediaSampleRateHertz().equals(this.getMediaSampleRateHertz()) == false)
return false;
if (other.getMediaFormat() == null ^ this.getMediaFormat() == null)
return false;
if (other.getMediaFormat() != null && other.getMediaFormat().equals(this.getMediaFormat()) == false)
return false;
if (other.getMedia() == null ^ this.getMedia() == null)
return false;
if (other.getMedia() != null && other.getMedia().equals(this.getMedia()) == false)
return false;
if (other.getOutputBucketName() == null ^ this.getOutputBucketName() == null)
return false;
if (other.getOutputBucketName() != null && other.getOutputBucketName().equals(this.getOutputBucketName()) == false)
return false;
if (other.getOutputKey() == null ^ this.getOutputKey() == null)
return false;
if (other.getOutputKey() != null && other.getOutputKey().equals(this.getOutputKey()) == false)
return false;
if (other.getOutputEncryptionKMSKeyId() == null ^ this.getOutputEncryptionKMSKeyId() == null)
return false;
if (other.getOutputEncryptionKMSKeyId() != null && other.getOutputEncryptionKMSKeyId().equals(this.getOutputEncryptionKMSKeyId()) == false)
return false;
if (other.getKMSEncryptionContext() == null ^ this.getKMSEncryptionContext() == null)
return false;
if (other.getKMSEncryptionContext() != null && other.getKMSEncryptionContext().equals(this.getKMSEncryptionContext()) == false)
return false;
if (other.getSettings() == null ^ this.getSettings() == null)
return false;
if (other.getSettings() != null && other.getSettings().equals(this.getSettings()) == false)
return false;
if (other.getModelSettings() == null ^ this.getModelSettings() == null)
return false;
if (other.getModelSettings() != null && other.getModelSettings().equals(this.getModelSettings()) == false)
return false;
if (other.getJobExecutionSettings() == null ^ this.getJobExecutionSettings() == null)
return false;
if (other.getJobExecutionSettings() != null && other.getJobExecutionSettings().equals(this.getJobExecutionSettings()) == false)
return false;
if (other.getContentRedaction() == null ^ this.getContentRedaction() == null)
return false;
if (other.getContentRedaction() != null && other.getContentRedaction().equals(this.getContentRedaction()) == false)
return false;
if (other.getIdentifyLanguage() == null ^ this.getIdentifyLanguage() == null)
return false;
if (other.getIdentifyLanguage() != null && other.getIdentifyLanguage().equals(this.getIdentifyLanguage()) == false)
return false;
if (other.getIdentifyMultipleLanguages() == null ^ this.getIdentifyMultipleLanguages() == null)
return false;
if (other.getIdentifyMultipleLanguages() != null && other.getIdentifyMultipleLanguages().equals(this.getIdentifyMultipleLanguages()) == false)
return false;
if (other.getLanguageOptions() == null ^ this.getLanguageOptions() == null)
return false;
if (other.getLanguageOptions() != null && other.getLanguageOptions().equals(this.getLanguageOptions()) == false)
return false;
if (other.getSubtitles() == null ^ this.getSubtitles() == null)
return false;
if (other.getSubtitles() != null && other.getSubtitles().equals(this.getSubtitles()) == false)
return false;
if (other.getTags() == null ^ this.getTags() == null)
return false;
if (other.getTags() != null && other.getTags().equals(this.getTags()) == false)
return false;
if (other.getLanguageIdSettings() == null ^ this.getLanguageIdSettings() == null)
return false;
if (other.getLanguageIdSettings() != null && other.getLanguageIdSettings().equals(this.getLanguageIdSettings()) == false)
return false;
if (other.getToxicityDetection() == null ^ this.getToxicityDetection() == null)
return false;
if (other.getToxicityDetection() != null && other.getToxicityDetection().equals(this.getToxicityDetection()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getTranscriptionJobName() == null) ? 0 : getTranscriptionJobName().hashCode());
hashCode = prime * hashCode + ((getLanguageCode() == null) ? 0 : getLanguageCode().hashCode());
hashCode = prime * hashCode + ((getMediaSampleRateHertz() == null) ? 0 : getMediaSampleRateHertz().hashCode());
hashCode = prime * hashCode + ((getMediaFormat() == null) ? 0 : getMediaFormat().hashCode());
hashCode = prime * hashCode + ((getMedia() == null) ? 0 : getMedia().hashCode());
hashCode = prime * hashCode + ((getOutputBucketName() == null) ? 0 : getOutputBucketName().hashCode());
hashCode = prime * hashCode + ((getOutputKey() == null) ? 0 : getOutputKey().hashCode());
hashCode = prime * hashCode + ((getOutputEncryptionKMSKeyId() == null) ? 0 : getOutputEncryptionKMSKeyId().hashCode());
hashCode = prime * hashCode + ((getKMSEncryptionContext() == null) ? 0 : getKMSEncryptionContext().hashCode());
hashCode = prime * hashCode + ((getSettings() == null) ? 0 : getSettings().hashCode());
hashCode = prime * hashCode + ((getModelSettings() == null) ? 0 : getModelSettings().hashCode());
hashCode = prime * hashCode + ((getJobExecutionSettings() == null) ? 0 : getJobExecutionSettings().hashCode());
hashCode = prime * hashCode + ((getContentRedaction() == null) ? 0 : getContentRedaction().hashCode());
hashCode = prime * hashCode + ((getIdentifyLanguage() == null) ? 0 : getIdentifyLanguage().hashCode());
hashCode = prime * hashCode + ((getIdentifyMultipleLanguages() == null) ? 0 : getIdentifyMultipleLanguages().hashCode());
hashCode = prime * hashCode + ((getLanguageOptions() == null) ? 0 : getLanguageOptions().hashCode());
hashCode = prime * hashCode + ((getSubtitles() == null) ? 0 : getSubtitles().hashCode());
hashCode = prime * hashCode + ((getTags() == null) ? 0 : getTags().hashCode());
hashCode = prime * hashCode + ((getLanguageIdSettings() == null) ? 0 : getLanguageIdSettings().hashCode());
hashCode = prime * hashCode + ((getToxicityDetection() == null) ? 0 : getToxicityDetection().hashCode());
return hashCode;
}
@Override
public StartTranscriptionJobRequest clone() {
return (StartTranscriptionJobRequest) super.clone();
}
}