All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.videointelligence.v1.SpeechTranscriptionConfigOrBuilder Maven / Gradle / Ivy

There is a newer version: 2.50.0
Show newest version
/*
 * Copyright 2020 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/videointelligence/v1/video_intelligence.proto

package com.google.cloud.videointelligence.v1;

public interface SpeechTranscriptionConfigOrBuilder
    extends
    // @@protoc_insertion_point(interface_extends:google.cloud.videointelligence.v1.SpeechTranscriptionConfig)
    com.google.protobuf.MessageOrBuilder {

  /**
   *
   *
   * 
   * Required. *Required* The language of the supplied audio as a
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
   * Example: "en-US".
   * See [Language Support](https://cloud.google.com/speech/docs/languages)
   * for a list of the currently supported language codes.
   * 
* * string language_code = 1 [(.google.api.field_behavior) = REQUIRED]; * * @return The languageCode. */ java.lang.String getLanguageCode(); /** * * *
   * Required. *Required* The language of the supplied audio as a
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
   * Example: "en-US".
   * See [Language Support](https://cloud.google.com/speech/docs/languages)
   * for a list of the currently supported language codes.
   * 
* * string language_code = 1 [(.google.api.field_behavior) = REQUIRED]; * * @return The bytes for languageCode. */ com.google.protobuf.ByteString getLanguageCodeBytes(); /** * * *
   * Optional. Maximum number of recognition hypotheses to be returned.
   * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
   * within each `SpeechTranscription`. The server may return fewer than
   * `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
   * return a maximum of one. If omitted, will return a maximum of one.
   * 
* * int32 max_alternatives = 2 [(.google.api.field_behavior) = OPTIONAL]; * * @return The maxAlternatives. */ int getMaxAlternatives(); /** * * *
   * Optional. If set to `true`, the server will attempt to filter out
   * profanities, replacing all but the initial character in each filtered word
   * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
   * won't be filtered out.
   * 
* * bool filter_profanity = 3 [(.google.api.field_behavior) = OPTIONAL]; * * @return The filterProfanity. */ boolean getFilterProfanity(); /** * * *
   * Optional. A means to provide context to assist the speech recognition.
   * 
* * * repeated .google.cloud.videointelligence.v1.SpeechContext speech_contexts = 4 [(.google.api.field_behavior) = OPTIONAL]; * */ java.util.List getSpeechContextsList(); /** * * *
   * Optional. A means to provide context to assist the speech recognition.
   * 
* * * repeated .google.cloud.videointelligence.v1.SpeechContext speech_contexts = 4 [(.google.api.field_behavior) = OPTIONAL]; * */ com.google.cloud.videointelligence.v1.SpeechContext getSpeechContexts(int index); /** * * *
   * Optional. A means to provide context to assist the speech recognition.
   * 
* * * repeated .google.cloud.videointelligence.v1.SpeechContext speech_contexts = 4 [(.google.api.field_behavior) = OPTIONAL]; * */ int getSpeechContextsCount(); /** * * *
   * Optional. A means to provide context to assist the speech recognition.
   * 
* * * repeated .google.cloud.videointelligence.v1.SpeechContext speech_contexts = 4 [(.google.api.field_behavior) = OPTIONAL]; * */ java.util.List getSpeechContextsOrBuilderList(); /** * * *
   * Optional. A means to provide context to assist the speech recognition.
   * 
* * * repeated .google.cloud.videointelligence.v1.SpeechContext speech_contexts = 4 [(.google.api.field_behavior) = OPTIONAL]; * */ com.google.cloud.videointelligence.v1.SpeechContextOrBuilder getSpeechContextsOrBuilder( int index); /** * * *
   * Optional. If 'true', adds punctuation to recognition result hypotheses.
   * This feature is only available in select languages. Setting this for
   * requests in other languages has no effect at all. The default 'false' value
   * does not add punctuation to result hypotheses. NOTE: "This is currently
   * offered as an experimental service, complimentary to all users. In the
   * future this may be exclusively available as a premium feature."
   * 
* * bool enable_automatic_punctuation = 5 [(.google.api.field_behavior) = OPTIONAL]; * * @return The enableAutomaticPunctuation. */ boolean getEnableAutomaticPunctuation(); /** * * *
   * Optional. For file formats, such as MXF or MKV, supporting multiple audio
   * tracks, specify up to two tracks. Default: track 0.
   * 
* * repeated int32 audio_tracks = 6 [(.google.api.field_behavior) = OPTIONAL]; * * @return A list containing the audioTracks. */ java.util.List getAudioTracksList(); /** * * *
   * Optional. For file formats, such as MXF or MKV, supporting multiple audio
   * tracks, specify up to two tracks. Default: track 0.
   * 
* * repeated int32 audio_tracks = 6 [(.google.api.field_behavior) = OPTIONAL]; * * @return The count of audioTracks. */ int getAudioTracksCount(); /** * * *
   * Optional. For file formats, such as MXF or MKV, supporting multiple audio
   * tracks, specify up to two tracks. Default: track 0.
   * 
* * repeated int32 audio_tracks = 6 [(.google.api.field_behavior) = OPTIONAL]; * * @param index The index of the element to return. * @return The audioTracks at the given index. */ int getAudioTracks(int index); /** * * *
   * Optional. If 'true', enables speaker detection for each recognized word in
   * the top alternative of the recognition result using a speaker_tag provided
   * in the WordInfo.
   * Note: When this is true, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive response.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * 
* * bool enable_speaker_diarization = 7 [(.google.api.field_behavior) = OPTIONAL]; * * @return The enableSpeakerDiarization. */ boolean getEnableSpeakerDiarization(); /** * * *
   * Optional. If set, specifies the estimated number of speakers in the
   * conversation. If not set, defaults to '2'. Ignored unless
   * enable_speaker_diarization is set to true.
   * 
* * int32 diarization_speaker_count = 8 [(.google.api.field_behavior) = OPTIONAL]; * * @return The diarizationSpeakerCount. */ int getDiarizationSpeakerCount(); /** * * *
   * Optional. If `true`, the top result includes a list of words and the
   * confidence for those words. If `false`, no word-level confidence
   * information is returned. The default is `false`.
   * 
* * bool enable_word_confidence = 9 [(.google.api.field_behavior) = OPTIONAL]; * * @return The enableWordConfidence. */ boolean getEnableWordConfidence(); }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy