All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.speech.v1.RecognitionConfig Maven / Gradle / Ivy

There is a newer version: 4.49.0
Show newest version
/*
 * Copyright 2024 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/speech/v1/cloud_speech.proto

// Protobuf Java Version: 3.25.3
package com.google.cloud.speech.v1;

/**
 *
 *
 * 
 * Provides information to the recognizer that specifies how to process the
 * request.
 * 
* * Protobuf type {@code google.cloud.speech.v1.RecognitionConfig} */ public final class RecognitionConfig extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.speech.v1.RecognitionConfig) RecognitionConfigOrBuilder { private static final long serialVersionUID = 0L; // Use RecognitionConfig.newBuilder() to construct. private RecognitionConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private RecognitionConfig() { encoding_ = 0; languageCode_ = ""; alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.emptyList(); speechContexts_ = java.util.Collections.emptyList(); model_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new RecognitionConfig(); } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.speech.v1.SpeechProto .internal_static_google_cloud_speech_v1_RecognitionConfig_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.speech.v1.SpeechProto .internal_static_google_cloud_speech_v1_RecognitionConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.speech.v1.RecognitionConfig.class, com.google.cloud.speech.v1.RecognitionConfig.Builder.class); } /** * * *
   * The encoding of the audio data sent in the request.
   *
   * All encodings support only 1 channel (mono) audio, unless the
   * `audio_channel_count` and `enable_separate_recognition_per_channel` fields
   * are set.
   *
   * For best results, the audio source should be captured and transmitted using
   * a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
   * recognition can be reduced if lossy codecs are used to capture or transmit
   * audio, particularly if background noise is present. Lossy codecs include
   * `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, `SPEEX_WITH_HEADER_BYTE`, `MP3`,
   * and `WEBM_OPUS`.
   *
   * The `FLAC` and `WAV` audio file formats include a header that describes the
   * included audio content. You can request recognition for `WAV` files that
   * contain either `LINEAR16` or `MULAW` encoded audio.
   * If you send `FLAC` or `WAV` audio file format in
   * your request, you do not need to specify an `AudioEncoding`; the audio
   * encoding format is determined from the file header. If you specify
   * an `AudioEncoding` when you send  send `FLAC` or `WAV` audio, the
   * encoding configuration must match the encoding described in the audio
   * header; otherwise the request returns an
   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error
   * code.
   * 
* * Protobuf enum {@code google.cloud.speech.v1.RecognitionConfig.AudioEncoding} */ public enum AudioEncoding implements com.google.protobuf.ProtocolMessageEnum { /** * * *
     * Not specified.
     * 
* * ENCODING_UNSPECIFIED = 0; */ ENCODING_UNSPECIFIED(0), /** * * *
     * Uncompressed 16-bit signed little-endian samples (Linear PCM).
     * 
* * LINEAR16 = 1; */ LINEAR16(1), /** * * *
     * `FLAC` (Free Lossless Audio
     * Codec) is the recommended encoding because it is
     * lossless--therefore recognition is not compromised--and
     * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
     * encoding supports 16-bit and 24-bit samples, however, not all fields in
     * `STREAMINFO` are supported.
     * 
* * FLAC = 2; */ FLAC(2), /** * * *
     * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
     * 
* * MULAW = 3; */ MULAW(3), /** * * *
     * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
     * 
* * AMR = 4; */ AMR(4), /** * * *
     * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
     * 
* * AMR_WB = 5; */ AMR_WB(5), /** * * *
     * Opus encoded audio frames in Ogg container
     * ([OggOpus](https://wiki.xiph.org/OggOpus)).
     * `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
     * 
* * OGG_OPUS = 6; */ OGG_OPUS(6), /** * * *
     * Although the use of lossy encodings is not recommended, if a very low
     * bitrate encoding is required, `OGG_OPUS` is highly preferred over
     * Speex encoding. The [Speex](https://speex.org/)  encoding supported by
     * Cloud Speech API has a header byte in each block, as in MIME type
     * `audio/x-speex-with-header-byte`.
     * It is a variant of the RTP Speex encoding defined in
     * [RFC 5574](https://tools.ietf.org/html/rfc5574).
     * The stream is a sequence of blocks, one block per RTP packet. Each block
     * starts with a byte containing the length of the block, in bytes, followed
     * by one or more frames of Speex data, padded to an integral number of
     * bytes (octets) as specified in RFC 5574. In other words, each RTP header
     * is replaced with a single byte containing the block length. Only Speex
     * wideband is supported. `sample_rate_hertz` must be 16000.
     * 
* * SPEEX_WITH_HEADER_BYTE = 7; */ SPEEX_WITH_HEADER_BYTE(7), /** * * *
     * MP3 audio. MP3 encoding is a Beta feature and only available in
     * v1p1beta1. Support all standard MP3 bitrates (which range from 32-320
     * kbps). When using this encoding, `sample_rate_hertz` has to match the
     * sample rate of the file being used.
     * 
* * MP3 = 8; */ MP3(8), /** * * *
     * Opus encoded audio frames in WebM container
     * ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be
     * one of 8000, 12000, 16000, 24000, or 48000.
     * 
* * WEBM_OPUS = 9; */ WEBM_OPUS(9), UNRECOGNIZED(-1), ; /** * * *
     * Not specified.
     * 
* * ENCODING_UNSPECIFIED = 0; */ public static final int ENCODING_UNSPECIFIED_VALUE = 0; /** * * *
     * Uncompressed 16-bit signed little-endian samples (Linear PCM).
     * 
* * LINEAR16 = 1; */ public static final int LINEAR16_VALUE = 1; /** * * *
     * `FLAC` (Free Lossless Audio
     * Codec) is the recommended encoding because it is
     * lossless--therefore recognition is not compromised--and
     * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
     * encoding supports 16-bit and 24-bit samples, however, not all fields in
     * `STREAMINFO` are supported.
     * 
* * FLAC = 2; */ public static final int FLAC_VALUE = 2; /** * * *
     * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
     * 
* * MULAW = 3; */ public static final int MULAW_VALUE = 3; /** * * *
     * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
     * 
* * AMR = 4; */ public static final int AMR_VALUE = 4; /** * * *
     * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
     * 
* * AMR_WB = 5; */ public static final int AMR_WB_VALUE = 5; /** * * *
     * Opus encoded audio frames in Ogg container
     * ([OggOpus](https://wiki.xiph.org/OggOpus)).
     * `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
     * 
* * OGG_OPUS = 6; */ public static final int OGG_OPUS_VALUE = 6; /** * * *
     * Although the use of lossy encodings is not recommended, if a very low
     * bitrate encoding is required, `OGG_OPUS` is highly preferred over
     * Speex encoding. The [Speex](https://speex.org/)  encoding supported by
     * Cloud Speech API has a header byte in each block, as in MIME type
     * `audio/x-speex-with-header-byte`.
     * It is a variant of the RTP Speex encoding defined in
     * [RFC 5574](https://tools.ietf.org/html/rfc5574).
     * The stream is a sequence of blocks, one block per RTP packet. Each block
     * starts with a byte containing the length of the block, in bytes, followed
     * by one or more frames of Speex data, padded to an integral number of
     * bytes (octets) as specified in RFC 5574. In other words, each RTP header
     * is replaced with a single byte containing the block length. Only Speex
     * wideband is supported. `sample_rate_hertz` must be 16000.
     * 
* * SPEEX_WITH_HEADER_BYTE = 7; */ public static final int SPEEX_WITH_HEADER_BYTE_VALUE = 7; /** * * *
     * MP3 audio. MP3 encoding is a Beta feature and only available in
     * v1p1beta1. Support all standard MP3 bitrates (which range from 32-320
     * kbps). When using this encoding, `sample_rate_hertz` has to match the
     * sample rate of the file being used.
     * 
* * MP3 = 8; */ public static final int MP3_VALUE = 8; /** * * *
     * Opus encoded audio frames in WebM container
     * ([OggOpus](https://wiki.xiph.org/OggOpus)). `sample_rate_hertz` must be
     * one of 8000, 12000, 16000, 24000, or 48000.
     * 
* * WEBM_OPUS = 9; */ public static final int WEBM_OPUS_VALUE = 9; public final int getNumber() { if (this == UNRECOGNIZED) { throw new java.lang.IllegalArgumentException( "Can't get the number of an unknown enum value."); } return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static AudioEncoding valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static AudioEncoding forNumber(int value) { switch (value) { case 0: return ENCODING_UNSPECIFIED; case 1: return LINEAR16; case 2: return FLAC; case 3: return MULAW; case 4: return AMR; case 5: return AMR_WB; case 6: return OGG_OPUS; case 7: return SPEEX_WITH_HEADER_BYTE; case 8: return MP3; case 9: return WEBM_OPUS; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public AudioEncoding findValueByNumber(int number) { return AudioEncoding.forNumber(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { if (this == UNRECOGNIZED) { throw new java.lang.IllegalStateException( "Can't get the descriptor of an unrecognized enum value."); } return getDescriptor().getValues().get(ordinal()); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return com.google.cloud.speech.v1.RecognitionConfig.getDescriptor().getEnumTypes().get(0); } private static final AudioEncoding[] VALUES = values(); public static AudioEncoding valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); } if (desc.getIndex() == -1) { return UNRECOGNIZED; } return VALUES[desc.getIndex()]; } private final int value; private AudioEncoding(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:google.cloud.speech.v1.RecognitionConfig.AudioEncoding) } private int bitField0_; public static final int ENCODING_FIELD_NUMBER = 1; private int encoding_ = 0; /** * * *
   * Encoding of audio data sent in all `RecognitionAudio` messages.
   * This field is optional for `FLAC` and `WAV` audio files and required
   * for all other audio formats. For details, see
   * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
   * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; * * @return The enum numeric value on the wire for encoding. */ @java.lang.Override public int getEncodingValue() { return encoding_; } /** * * *
   * Encoding of audio data sent in all `RecognitionAudio` messages.
   * This field is optional for `FLAC` and `WAV` audio files and required
   * for all other audio formats. For details, see
   * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
   * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; * * @return The encoding. */ @java.lang.Override public com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding getEncoding() { com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding result = com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.forNumber(encoding_); return result == null ? com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.UNRECOGNIZED : result; } public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 2; private int sampleRateHertz_ = 0; /** * * *
   * Sample rate in Hertz of the audio data sent in all
   * `RecognitionAudio` messages. Valid values are: 8000-48000.
   * 16000 is optimal. For best results, set the sampling rate of the audio
   * source to 16000 Hz. If that's not possible, use the native sample rate of
   * the audio source (instead of re-sampling).
   * This field is optional for FLAC and WAV audio files, but is
   * required for all other audio formats. For details, see
   * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
   * 
* * int32 sample_rate_hertz = 2; * * @return The sampleRateHertz. */ @java.lang.Override public int getSampleRateHertz() { return sampleRateHertz_; } public static final int AUDIO_CHANNEL_COUNT_FIELD_NUMBER = 7; private int audioChannelCount_ = 0; /** * * *
   * The number of channels in the input audio data.
   * ONLY set this for MULTI-CHANNEL recognition.
   * Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
   * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
   * If `0` or omitted, defaults to one channel (mono).
   * Note: We only recognize the first channel by default.
   * To perform independent recognition on each channel set
   * `enable_separate_recognition_per_channel` to 'true'.
   * 
* * int32 audio_channel_count = 7; * * @return The audioChannelCount. */ @java.lang.Override public int getAudioChannelCount() { return audioChannelCount_; } public static final int ENABLE_SEPARATE_RECOGNITION_PER_CHANNEL_FIELD_NUMBER = 12; private boolean enableSeparateRecognitionPerChannel_ = false; /** * * *
   * This needs to be set to `true` explicitly and `audio_channel_count` > 1
   * to get each channel recognized separately. The recognition result will
   * contain a `channel_tag` field to state which channel that result belongs
   * to. If this is not true, we will only recognize the first channel. The
   * request is billed cumulatively for all channels recognized:
   * `audio_channel_count` multiplied by the length of the audio.
   * 
* * bool enable_separate_recognition_per_channel = 12; * * @return The enableSeparateRecognitionPerChannel. */ @java.lang.Override public boolean getEnableSeparateRecognitionPerChannel() { return enableSeparateRecognitionPerChannel_; } public static final int LANGUAGE_CODE_FIELD_NUMBER = 3; @SuppressWarnings("serial") private volatile java.lang.Object languageCode_ = ""; /** * * *
   * Required. The language of the supplied audio as a
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
   * Example: "en-US".
   * See [Language
   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
   * of the currently supported language codes.
   * 
* * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; * * @return The languageCode. */ @java.lang.Override public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); languageCode_ = s; return s; } } /** * * *
   * Required. The language of the supplied audio as a
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
   * Example: "en-US".
   * See [Language
   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
   * of the currently supported language codes.
   * 
* * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; * * @return The bytes for languageCode. */ @java.lang.Override public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); languageCode_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int ALTERNATIVE_LANGUAGE_CODES_FIELD_NUMBER = 18; @SuppressWarnings("serial") private com.google.protobuf.LazyStringArrayList alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.emptyList(); /** * * *
   * A list of up to 3 additional
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   * listing possible alternative languages of the supplied audio.
   * See [Language
   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
   * of the currently supported language codes. If alternative languages are
   * listed, recognition result will contain recognition in the most likely
   * language detected including the main language_code. The recognition result
   * will include the language tag of the language detected in the audio. Note:
   * This feature is only supported for Voice Command and Voice Search use cases
   * and performance may vary for other use cases (e.g., phone call
   * transcription).
   * 
* * repeated string alternative_language_codes = 18; * * @return A list containing the alternativeLanguageCodes. */ public com.google.protobuf.ProtocolStringList getAlternativeLanguageCodesList() { return alternativeLanguageCodes_; } /** * * *
   * A list of up to 3 additional
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   * listing possible alternative languages of the supplied audio.
   * See [Language
   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
   * of the currently supported language codes. If alternative languages are
   * listed, recognition result will contain recognition in the most likely
   * language detected including the main language_code. The recognition result
   * will include the language tag of the language detected in the audio. Note:
   * This feature is only supported for Voice Command and Voice Search use cases
   * and performance may vary for other use cases (e.g., phone call
   * transcription).
   * 
* * repeated string alternative_language_codes = 18; * * @return The count of alternativeLanguageCodes. */ public int getAlternativeLanguageCodesCount() { return alternativeLanguageCodes_.size(); } /** * * *
   * A list of up to 3 additional
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   * listing possible alternative languages of the supplied audio.
   * See [Language
   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
   * of the currently supported language codes. If alternative languages are
   * listed, recognition result will contain recognition in the most likely
   * language detected including the main language_code. The recognition result
   * will include the language tag of the language detected in the audio. Note:
   * This feature is only supported for Voice Command and Voice Search use cases
   * and performance may vary for other use cases (e.g., phone call
   * transcription).
   * 
* * repeated string alternative_language_codes = 18; * * @param index The index of the element to return. * @return The alternativeLanguageCodes at the given index. */ public java.lang.String getAlternativeLanguageCodes(int index) { return alternativeLanguageCodes_.get(index); } /** * * *
   * A list of up to 3 additional
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   * listing possible alternative languages of the supplied audio.
   * See [Language
   * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
   * of the currently supported language codes. If alternative languages are
   * listed, recognition result will contain recognition in the most likely
   * language detected including the main language_code. The recognition result
   * will include the language tag of the language detected in the audio. Note:
   * This feature is only supported for Voice Command and Voice Search use cases
   * and performance may vary for other use cases (e.g., phone call
   * transcription).
   * 
* * repeated string alternative_language_codes = 18; * * @param index The index of the value to return. * @return The bytes of the alternativeLanguageCodes at the given index. */ public com.google.protobuf.ByteString getAlternativeLanguageCodesBytes(int index) { return alternativeLanguageCodes_.getByteString(index); } public static final int MAX_ALTERNATIVES_FIELD_NUMBER = 4; private int maxAlternatives_ = 0; /** * * *
   * Maximum number of recognition hypotheses to be returned.
   * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
   * within each `SpeechRecognitionResult`.
   * The server may return fewer than `max_alternatives`.
   * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
   * one. If omitted, will return a maximum of one.
   * 
* * int32 max_alternatives = 4; * * @return The maxAlternatives. */ @java.lang.Override public int getMaxAlternatives() { return maxAlternatives_; } public static final int PROFANITY_FILTER_FIELD_NUMBER = 5; private boolean profanityFilter_ = false; /** * * *
   * If set to `true`, the server will attempt to filter out
   * profanities, replacing all but the initial character in each filtered word
   * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
   * won't be filtered out.
   * 
* * bool profanity_filter = 5; * * @return The profanityFilter. */ @java.lang.Override public boolean getProfanityFilter() { return profanityFilter_; } public static final int ADAPTATION_FIELD_NUMBER = 20; private com.google.cloud.speech.v1.SpeechAdaptation adaptation_; /** * * *
   * Speech adaptation configuration improves the accuracy of speech
   * recognition. For more information, see the [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
   * documentation.
   * When speech adaptation is set it supersedes the `speech_contexts` field.
   * 
* * .google.cloud.speech.v1.SpeechAdaptation adaptation = 20; * * @return Whether the adaptation field is set. */ @java.lang.Override public boolean hasAdaptation() { return ((bitField0_ & 0x00000001) != 0); } /** * * *
   * Speech adaptation configuration improves the accuracy of speech
   * recognition. For more information, see the [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
   * documentation.
   * When speech adaptation is set it supersedes the `speech_contexts` field.
   * 
* * .google.cloud.speech.v1.SpeechAdaptation adaptation = 20; * * @return The adaptation. */ @java.lang.Override public com.google.cloud.speech.v1.SpeechAdaptation getAdaptation() { return adaptation_ == null ? com.google.cloud.speech.v1.SpeechAdaptation.getDefaultInstance() : adaptation_; } /** * * *
   * Speech adaptation configuration improves the accuracy of speech
   * recognition. For more information, see the [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
   * documentation.
   * When speech adaptation is set it supersedes the `speech_contexts` field.
   * 
* * .google.cloud.speech.v1.SpeechAdaptation adaptation = 20; */ @java.lang.Override public com.google.cloud.speech.v1.SpeechAdaptationOrBuilder getAdaptationOrBuilder() { return adaptation_ == null ? com.google.cloud.speech.v1.SpeechAdaptation.getDefaultInstance() : adaptation_; } public static final int TRANSCRIPT_NORMALIZATION_FIELD_NUMBER = 24; private com.google.cloud.speech.v1.TranscriptNormalization transcriptNormalization_; /** * * *
   * Optional. Use transcription normalization to automatically replace parts of
   * the transcript with phrases of your choosing. For StreamingRecognize, this
   * normalization only applies to stable partial transcripts (stability > 0.8)
   * and final transcripts.
   * 
* * * .google.cloud.speech.v1.TranscriptNormalization transcript_normalization = 24 [(.google.api.field_behavior) = OPTIONAL]; * * * @return Whether the transcriptNormalization field is set. */ @java.lang.Override public boolean hasTranscriptNormalization() { return ((bitField0_ & 0x00000002) != 0); } /** * * *
   * Optional. Use transcription normalization to automatically replace parts of
   * the transcript with phrases of your choosing. For StreamingRecognize, this
   * normalization only applies to stable partial transcripts (stability > 0.8)
   * and final transcripts.
   * 
* * * .google.cloud.speech.v1.TranscriptNormalization transcript_normalization = 24 [(.google.api.field_behavior) = OPTIONAL]; * * * @return The transcriptNormalization. */ @java.lang.Override public com.google.cloud.speech.v1.TranscriptNormalization getTranscriptNormalization() { return transcriptNormalization_ == null ? com.google.cloud.speech.v1.TranscriptNormalization.getDefaultInstance() : transcriptNormalization_; } /** * * *
   * Optional. Use transcription normalization to automatically replace parts of
   * the transcript with phrases of your choosing. For StreamingRecognize, this
   * normalization only applies to stable partial transcripts (stability > 0.8)
   * and final transcripts.
   * 
* * * .google.cloud.speech.v1.TranscriptNormalization transcript_normalization = 24 [(.google.api.field_behavior) = OPTIONAL]; * */ @java.lang.Override public com.google.cloud.speech.v1.TranscriptNormalizationOrBuilder getTranscriptNormalizationOrBuilder() { return transcriptNormalization_ == null ? com.google.cloud.speech.v1.TranscriptNormalization.getDefaultInstance() : transcriptNormalization_; } public static final int SPEECH_CONTEXTS_FIELD_NUMBER = 6; @SuppressWarnings("serial") private java.util.List speechContexts_; /** * * *
   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see
   * [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
   * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ @java.lang.Override public java.util.List getSpeechContextsList() { return speechContexts_; } /** * * *
   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see
   * [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
   * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ @java.lang.Override public java.util.List getSpeechContextsOrBuilderList() { return speechContexts_; } /** * * *
   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see
   * [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
   * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ @java.lang.Override public int getSpeechContextsCount() { return speechContexts_.size(); } /** * * *
   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see
   * [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
   * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ @java.lang.Override public com.google.cloud.speech.v1.SpeechContext getSpeechContexts(int index) { return speechContexts_.get(index); } /** * * *
   * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see
   * [speech
   * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
   * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ @java.lang.Override public com.google.cloud.speech.v1.SpeechContextOrBuilder getSpeechContextsOrBuilder(int index) { return speechContexts_.get(index); } public static final int ENABLE_WORD_TIME_OFFSETS_FIELD_NUMBER = 8; private boolean enableWordTimeOffsets_ = false; /** * * *
   * If `true`, the top result includes a list of words and
   * the start and end time offsets (timestamps) for those words. If
   * `false`, no word-level time offset information is returned. The default is
   * `false`.
   * 
* * bool enable_word_time_offsets = 8; * * @return The enableWordTimeOffsets. */ @java.lang.Override public boolean getEnableWordTimeOffsets() { return enableWordTimeOffsets_; } public static final int ENABLE_WORD_CONFIDENCE_FIELD_NUMBER = 15; private boolean enableWordConfidence_ = false; /** * * *
   * If `true`, the top result includes a list of words and the
   * confidence for those words. If `false`, no word-level confidence
   * information is returned. The default is `false`.
   * 
* * bool enable_word_confidence = 15; * * @return The enableWordConfidence. */ @java.lang.Override public boolean getEnableWordConfidence() { return enableWordConfidence_; } public static final int ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER = 11; private boolean enableAutomaticPunctuation_ = false; /** * * *
   * If 'true', adds punctuation to recognition result hypotheses.
   * This feature is only available in select languages. Setting this for
   * requests in other languages has no effect at all.
   * The default 'false' value does not add punctuation to result hypotheses.
   * 
* * bool enable_automatic_punctuation = 11; * * @return The enableAutomaticPunctuation. */ @java.lang.Override public boolean getEnableAutomaticPunctuation() { return enableAutomaticPunctuation_; } public static final int ENABLE_SPOKEN_PUNCTUATION_FIELD_NUMBER = 22; private com.google.protobuf.BoolValue enableSpokenPunctuation_; /** * * *
   * The spoken punctuation behavior for the call
   * If not set, uses default behavior based on model of choice
   * e.g. command_and_search will enable spoken punctuation by default
   * If 'true', replaces spoken punctuation with the corresponding symbols in
   * the request. For example, "how are you question mark" becomes "how are
   * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
   * for support. If 'false', spoken punctuation is not replaced.
   * 
* * .google.protobuf.BoolValue enable_spoken_punctuation = 22; * * @return Whether the enableSpokenPunctuation field is set. */ @java.lang.Override public boolean hasEnableSpokenPunctuation() { return ((bitField0_ & 0x00000004) != 0); } /** * * *
   * The spoken punctuation behavior for the call
   * If not set, uses default behavior based on model of choice
   * e.g. command_and_search will enable spoken punctuation by default
   * If 'true', replaces spoken punctuation with the corresponding symbols in
   * the request. For example, "how are you question mark" becomes "how are
   * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
   * for support. If 'false', spoken punctuation is not replaced.
   * 
* * .google.protobuf.BoolValue enable_spoken_punctuation = 22; * * @return The enableSpokenPunctuation. */ @java.lang.Override public com.google.protobuf.BoolValue getEnableSpokenPunctuation() { return enableSpokenPunctuation_ == null ? com.google.protobuf.BoolValue.getDefaultInstance() : enableSpokenPunctuation_; } /** * * *
   * The spoken punctuation behavior for the call
   * If not set, uses default behavior based on model of choice
   * e.g. command_and_search will enable spoken punctuation by default
   * If 'true', replaces spoken punctuation with the corresponding symbols in
   * the request. For example, "how are you question mark" becomes "how are
   * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
   * for support. If 'false', spoken punctuation is not replaced.
   * 
* * .google.protobuf.BoolValue enable_spoken_punctuation = 22; */ @java.lang.Override public com.google.protobuf.BoolValueOrBuilder getEnableSpokenPunctuationOrBuilder() { return enableSpokenPunctuation_ == null ? com.google.protobuf.BoolValue.getDefaultInstance() : enableSpokenPunctuation_; } public static final int ENABLE_SPOKEN_EMOJIS_FIELD_NUMBER = 23; private com.google.protobuf.BoolValue enableSpokenEmojis_; /** * * *
   * The spoken emoji behavior for the call
   * If not set, uses default behavior based on model of choice
   * If 'true', adds spoken emoji formatting for the request. This will replace
   * spoken emojis with the corresponding Unicode symbols in the final
   * transcript. If 'false', spoken emojis are not replaced.
   * 
* * .google.protobuf.BoolValue enable_spoken_emojis = 23; * * @return Whether the enableSpokenEmojis field is set. */ @java.lang.Override public boolean hasEnableSpokenEmojis() { return ((bitField0_ & 0x00000008) != 0); } /** * * *
   * The spoken emoji behavior for the call
   * If not set, uses default behavior based on model of choice
   * If 'true', adds spoken emoji formatting for the request. This will replace
   * spoken emojis with the corresponding Unicode symbols in the final
   * transcript. If 'false', spoken emojis are not replaced.
   * 
* * .google.protobuf.BoolValue enable_spoken_emojis = 23; * * @return The enableSpokenEmojis. */ @java.lang.Override public com.google.protobuf.BoolValue getEnableSpokenEmojis() { return enableSpokenEmojis_ == null ? com.google.protobuf.BoolValue.getDefaultInstance() : enableSpokenEmojis_; } /** * * *
   * The spoken emoji behavior for the call
   * If not set, uses default behavior based on model of choice
   * If 'true', adds spoken emoji formatting for the request. This will replace
   * spoken emojis with the corresponding Unicode symbols in the final
   * transcript. If 'false', spoken emojis are not replaced.
   * 
* * .google.protobuf.BoolValue enable_spoken_emojis = 23; */ @java.lang.Override public com.google.protobuf.BoolValueOrBuilder getEnableSpokenEmojisOrBuilder() { return enableSpokenEmojis_ == null ? com.google.protobuf.BoolValue.getDefaultInstance() : enableSpokenEmojis_; } public static final int DIARIZATION_CONFIG_FIELD_NUMBER = 19; private com.google.cloud.speech.v1.SpeakerDiarizationConfig diarizationConfig_; /** * * *
   * Config to enable speaker diarization and set additional
   * parameters to make diarization better suited for your application.
   * Note: When this is enabled, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * 
* * .google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19; * * @return Whether the diarizationConfig field is set. */ @java.lang.Override public boolean hasDiarizationConfig() { return ((bitField0_ & 0x00000010) != 0); } /** * * *
   * Config to enable speaker diarization and set additional
   * parameters to make diarization better suited for your application.
   * Note: When this is enabled, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * 
* * .google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19; * * @return The diarizationConfig. */ @java.lang.Override public com.google.cloud.speech.v1.SpeakerDiarizationConfig getDiarizationConfig() { return diarizationConfig_ == null ? com.google.cloud.speech.v1.SpeakerDiarizationConfig.getDefaultInstance() : diarizationConfig_; } /** * * *
   * Config to enable speaker diarization and set additional
   * parameters to make diarization better suited for your application.
   * Note: When this is enabled, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * 
* * .google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19; */ @java.lang.Override public com.google.cloud.speech.v1.SpeakerDiarizationConfigOrBuilder getDiarizationConfigOrBuilder() { return diarizationConfig_ == null ? com.google.cloud.speech.v1.SpeakerDiarizationConfig.getDefaultInstance() : diarizationConfig_; } public static final int METADATA_FIELD_NUMBER = 9; private com.google.cloud.speech.v1.RecognitionMetadata metadata_; /** * * *
   * Metadata regarding this request.
   * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; * * @return Whether the metadata field is set. */ @java.lang.Override public boolean hasMetadata() { return ((bitField0_ & 0x00000020) != 0); } /** * * *
   * Metadata regarding this request.
   * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; * * @return The metadata. */ @java.lang.Override public com.google.cloud.speech.v1.RecognitionMetadata getMetadata() { return metadata_ == null ? com.google.cloud.speech.v1.RecognitionMetadata.getDefaultInstance() : metadata_; } /** * * *
   * Metadata regarding this request.
   * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; */ @java.lang.Override public com.google.cloud.speech.v1.RecognitionMetadataOrBuilder getMetadataOrBuilder() { return metadata_ == null ? com.google.cloud.speech.v1.RecognitionMetadata.getDefaultInstance() : metadata_; } public static final int MODEL_FIELD_NUMBER = 13; @SuppressWarnings("serial") private volatile java.lang.Object model_ = ""; /** * * *
   * Which model to select for the given request. Select the model
   * best suited to your domain to get best results. If a model is not
   * explicitly specified, then we auto-select a model based on the parameters
   * in the RecognitionConfig.
   * <table>
   *   <tr>
   *     <td><b>Model</b></td>
   *     <td><b>Description</b></td>
   *   </tr>
   *   <tr>
   *     <td><code>latest_long</code></td>
   *     <td>Best for long form content like media or conversation.</td>
   *   </tr>
   *   <tr>
   *     <td><code>latest_short</code></td>
   *     <td>Best for short form content like commands or single shot directed
   *     speech.</td>
   *   </tr>
   *   <tr>
   *     <td><code>command_and_search</code></td>
   *     <td>Best for short queries such as voice commands or voice search.</td>
   *   </tr>
   *   <tr>
   *     <td><code>phone_call</code></td>
   *     <td>Best for audio that originated from a phone call (typically
   *     recorded at an 8khz sampling rate).</td>
   *   </tr>
   *   <tr>
   *     <td><code>video</code></td>
   *     <td>Best for audio that originated from video or includes multiple
   *         speakers. Ideally the audio is recorded at a 16khz or greater
   *         sampling rate. This is a premium model that costs more than the
   *         standard rate.</td>
   *   </tr>
   *   <tr>
   *     <td><code>default</code></td>
   *     <td>Best for audio that is not one of the specific audio models.
   *         For example, long-form audio. Ideally the audio is high-fidelity,
   *         recorded at a 16khz or greater sampling rate.</td>
   *   </tr>
   *   <tr>
   *     <td><code>medical_conversation</code></td>
   *     <td>Best for audio that originated from a conversation between a
   *         medical provider and patient.</td>
   *   </tr>
   *   <tr>
   *     <td><code>medical_dictation</code></td>
   *     <td>Best for audio that originated from dictation notes by a medical
   *         provider.</td>
   *   </tr>
   * </table>
   * 
* * string model = 13; * * @return The model. */ @java.lang.Override public java.lang.String getModel() { java.lang.Object ref = model_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); model_ = s; return s; } } /** * * *
   * Which model to select for the given request. Select the model
   * best suited to your domain to get best results. If a model is not
   * explicitly specified, then we auto-select a model based on the parameters
   * in the RecognitionConfig.
   * <table>
   *   <tr>
   *     <td><b>Model</b></td>
   *     <td><b>Description</b></td>
   *   </tr>
   *   <tr>
   *     <td><code>latest_long</code></td>
   *     <td>Best for long form content like media or conversation.</td>
   *   </tr>
   *   <tr>
   *     <td><code>latest_short</code></td>
   *     <td>Best for short form content like commands or single shot directed
   *     speech.</td>
   *   </tr>
   *   <tr>
   *     <td><code>command_and_search</code></td>
   *     <td>Best for short queries such as voice commands or voice search.</td>
   *   </tr>
   *   <tr>
   *     <td><code>phone_call</code></td>
   *     <td>Best for audio that originated from a phone call (typically
   *     recorded at an 8khz sampling rate).</td>
   *   </tr>
   *   <tr>
   *     <td><code>video</code></td>
   *     <td>Best for audio that originated from video or includes multiple
   *         speakers. Ideally the audio is recorded at a 16khz or greater
   *         sampling rate. This is a premium model that costs more than the
   *         standard rate.</td>
   *   </tr>
   *   <tr>
   *     <td><code>default</code></td>
   *     <td>Best for audio that is not one of the specific audio models.
   *         For example, long-form audio. Ideally the audio is high-fidelity,
   *         recorded at a 16khz or greater sampling rate.</td>
   *   </tr>
   *   <tr>
   *     <td><code>medical_conversation</code></td>
   *     <td>Best for audio that originated from a conversation between a
   *         medical provider and patient.</td>
   *   </tr>
   *   <tr>
   *     <td><code>medical_dictation</code></td>
   *     <td>Best for audio that originated from dictation notes by a medical
   *         provider.</td>
   *   </tr>
   * </table>
   * 
* * string model = 13; * * @return The bytes for model. */ @java.lang.Override public com.google.protobuf.ByteString getModelBytes() { java.lang.Object ref = model_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); model_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int USE_ENHANCED_FIELD_NUMBER = 14; private boolean useEnhanced_ = false; /** * * *
   * Set to true to use an enhanced model for speech recognition.
   * If `use_enhanced` is set to true and the `model` field is not set, then
   * an appropriate enhanced model is chosen if an enhanced model exists for
   * the audio.
   *
   * If `use_enhanced` is true and an enhanced version of the specified model
   * does not exist, then the speech is recognized using the standard version
   * of the specified model.
   * 
* * bool use_enhanced = 14; * * @return The useEnhanced. */ @java.lang.Override public boolean getUseEnhanced() { return useEnhanced_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (encoding_ != com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED .getNumber()) { output.writeEnum(1, encoding_); } if (sampleRateHertz_ != 0) { output.writeInt32(2, sampleRateHertz_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 3, languageCode_); } if (maxAlternatives_ != 0) { output.writeInt32(4, maxAlternatives_); } if (profanityFilter_ != false) { output.writeBool(5, profanityFilter_); } for (int i = 0; i < speechContexts_.size(); i++) { output.writeMessage(6, speechContexts_.get(i)); } if (audioChannelCount_ != 0) { output.writeInt32(7, audioChannelCount_); } if (enableWordTimeOffsets_ != false) { output.writeBool(8, enableWordTimeOffsets_); } if (((bitField0_ & 0x00000020) != 0)) { output.writeMessage(9, getMetadata()); } if (enableAutomaticPunctuation_ != false) { output.writeBool(11, enableAutomaticPunctuation_); } if (enableSeparateRecognitionPerChannel_ != false) { output.writeBool(12, enableSeparateRecognitionPerChannel_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 13, model_); } if (useEnhanced_ != false) { output.writeBool(14, useEnhanced_); } if (enableWordConfidence_ != false) { output.writeBool(15, enableWordConfidence_); } for (int i = 0; i < alternativeLanguageCodes_.size(); i++) { com.google.protobuf.GeneratedMessageV3.writeString( output, 18, alternativeLanguageCodes_.getRaw(i)); } if (((bitField0_ & 0x00000010) != 0)) { output.writeMessage(19, getDiarizationConfig()); } if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(20, getAdaptation()); } if (((bitField0_ & 0x00000004) != 0)) { output.writeMessage(22, getEnableSpokenPunctuation()); } if (((bitField0_ & 0x00000008) != 0)) { output.writeMessage(23, getEnableSpokenEmojis()); } if (((bitField0_ & 0x00000002) != 0)) { output.writeMessage(24, getTranscriptNormalization()); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (encoding_ != com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED .getNumber()) { size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, encoding_); } if (sampleRateHertz_ != 0) { size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, sampleRateHertz_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, languageCode_); } if (maxAlternatives_ != 0) { size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, maxAlternatives_); } if (profanityFilter_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, profanityFilter_); } for (int i = 0; i < speechContexts_.size(); i++) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, speechContexts_.get(i)); } if (audioChannelCount_ != 0) { size += com.google.protobuf.CodedOutputStream.computeInt32Size(7, audioChannelCount_); } if (enableWordTimeOffsets_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, enableWordTimeOffsets_); } if (((bitField0_ & 0x00000020) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, getMetadata()); } if (enableAutomaticPunctuation_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(11, enableAutomaticPunctuation_); } if (enableSeparateRecognitionPerChannel_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize( 12, enableSeparateRecognitionPerChannel_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(13, model_); } if (useEnhanced_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(14, useEnhanced_); } if (enableWordConfidence_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(15, enableWordConfidence_); } { int dataSize = 0; for (int i = 0; i < alternativeLanguageCodes_.size(); i++) { dataSize += computeStringSizeNoTag(alternativeLanguageCodes_.getRaw(i)); } size += dataSize; size += 2 * getAlternativeLanguageCodesList().size(); } if (((bitField0_ & 0x00000010) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(19, getDiarizationConfig()); } if (((bitField0_ & 0x00000001) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(20, getAdaptation()); } if (((bitField0_ & 0x00000004) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize( 22, getEnableSpokenPunctuation()); } if (((bitField0_ & 0x00000008) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(23, getEnableSpokenEmojis()); } if (((bitField0_ & 0x00000002) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize( 24, getTranscriptNormalization()); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.RecognitionConfig)) { return super.equals(obj); } com.google.cloud.speech.v1.RecognitionConfig other = (com.google.cloud.speech.v1.RecognitionConfig) obj; if (encoding_ != other.encoding_) return false; if (getSampleRateHertz() != other.getSampleRateHertz()) return false; if (getAudioChannelCount() != other.getAudioChannelCount()) return false; if (getEnableSeparateRecognitionPerChannel() != other.getEnableSeparateRecognitionPerChannel()) return false; if (!getLanguageCode().equals(other.getLanguageCode())) return false; if (!getAlternativeLanguageCodesList().equals(other.getAlternativeLanguageCodesList())) return false; if (getMaxAlternatives() != other.getMaxAlternatives()) return false; if (getProfanityFilter() != other.getProfanityFilter()) return false; if (hasAdaptation() != other.hasAdaptation()) return false; if (hasAdaptation()) { if (!getAdaptation().equals(other.getAdaptation())) return false; } if (hasTranscriptNormalization() != other.hasTranscriptNormalization()) return false; if (hasTranscriptNormalization()) { if (!getTranscriptNormalization().equals(other.getTranscriptNormalization())) return false; } if (!getSpeechContextsList().equals(other.getSpeechContextsList())) return false; if (getEnableWordTimeOffsets() != other.getEnableWordTimeOffsets()) return false; if (getEnableWordConfidence() != other.getEnableWordConfidence()) return false; if (getEnableAutomaticPunctuation() != other.getEnableAutomaticPunctuation()) return false; if (hasEnableSpokenPunctuation() != other.hasEnableSpokenPunctuation()) return false; if (hasEnableSpokenPunctuation()) { if (!getEnableSpokenPunctuation().equals(other.getEnableSpokenPunctuation())) return false; } if (hasEnableSpokenEmojis() != other.hasEnableSpokenEmojis()) return false; if (hasEnableSpokenEmojis()) { if (!getEnableSpokenEmojis().equals(other.getEnableSpokenEmojis())) return false; } if (hasDiarizationConfig() != other.hasDiarizationConfig()) return false; if (hasDiarizationConfig()) { if (!getDiarizationConfig().equals(other.getDiarizationConfig())) return false; } if (hasMetadata() != other.hasMetadata()) return false; if (hasMetadata()) { if (!getMetadata().equals(other.getMetadata())) return false; } if (!getModel().equals(other.getModel())) return false; if (getUseEnhanced() != other.getUseEnhanced()) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + ENCODING_FIELD_NUMBER; hash = (53 * hash) + encoding_; hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER; hash = (53 * hash) + getSampleRateHertz(); hash = (37 * hash) + AUDIO_CHANNEL_COUNT_FIELD_NUMBER; hash = (53 * hash) + getAudioChannelCount(); hash = (37 * hash) + ENABLE_SEPARATE_RECOGNITION_PER_CHANNEL_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableSeparateRecognitionPerChannel()); hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER; hash = (53 * hash) + getLanguageCode().hashCode(); if (getAlternativeLanguageCodesCount() > 0) { hash = (37 * hash) + ALTERNATIVE_LANGUAGE_CODES_FIELD_NUMBER; hash = (53 * hash) + getAlternativeLanguageCodesList().hashCode(); } hash = (37 * hash) + MAX_ALTERNATIVES_FIELD_NUMBER; hash = (53 * hash) + getMaxAlternatives(); hash = (37 * hash) + PROFANITY_FILTER_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getProfanityFilter()); if (hasAdaptation()) { hash = (37 * hash) + ADAPTATION_FIELD_NUMBER; hash = (53 * hash) + getAdaptation().hashCode(); } if (hasTranscriptNormalization()) { hash = (37 * hash) + TRANSCRIPT_NORMALIZATION_FIELD_NUMBER; hash = (53 * hash) + getTranscriptNormalization().hashCode(); } if (getSpeechContextsCount() > 0) { hash = (37 * hash) + SPEECH_CONTEXTS_FIELD_NUMBER; hash = (53 * hash) + getSpeechContextsList().hashCode(); } hash = (37 * hash) + ENABLE_WORD_TIME_OFFSETS_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordTimeOffsets()); hash = (37 * hash) + ENABLE_WORD_CONFIDENCE_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordConfidence()); hash = (37 * hash) + ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableAutomaticPunctuation()); if (hasEnableSpokenPunctuation()) { hash = (37 * hash) + ENABLE_SPOKEN_PUNCTUATION_FIELD_NUMBER; hash = (53 * hash) + getEnableSpokenPunctuation().hashCode(); } if (hasEnableSpokenEmojis()) { hash = (37 * hash) + ENABLE_SPOKEN_EMOJIS_FIELD_NUMBER; hash = (53 * hash) + getEnableSpokenEmojis().hashCode(); } if (hasDiarizationConfig()) { hash = (37 * hash) + DIARIZATION_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getDiarizationConfig().hashCode(); } if (hasMetadata()) { hash = (37 * hash) + METADATA_FIELD_NUMBER; hash = (53 * hash) + getMetadata().hashCode(); } hash = (37 * hash) + MODEL_FIELD_NUMBER; hash = (53 * hash) + getModel().hashCode(); hash = (37 * hash) + USE_ENHANCED_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getUseEnhanced()); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.speech.v1.RecognitionConfig parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.speech.v1.RecognitionConfig parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(com.google.cloud.speech.v1.RecognitionConfig prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * *
   * Provides information to the recognizer that specifies how to process the
   * request.
   * 
* * Protobuf type {@code google.cloud.speech.v1.RecognitionConfig} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.cloud.speech.v1.RecognitionConfig) com.google.cloud.speech.v1.RecognitionConfigOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.speech.v1.SpeechProto .internal_static_google_cloud_speech_v1_RecognitionConfig_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.speech.v1.SpeechProto .internal_static_google_cloud_speech_v1_RecognitionConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.speech.v1.RecognitionConfig.class, com.google.cloud.speech.v1.RecognitionConfig.Builder.class); } // Construct using com.google.cloud.speech.v1.RecognitionConfig.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { getAdaptationFieldBuilder(); getTranscriptNormalizationFieldBuilder(); getSpeechContextsFieldBuilder(); getEnableSpokenPunctuationFieldBuilder(); getEnableSpokenEmojisFieldBuilder(); getDiarizationConfigFieldBuilder(); getMetadataFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; encoding_ = 0; sampleRateHertz_ = 0; audioChannelCount_ = 0; enableSeparateRecognitionPerChannel_ = false; languageCode_ = ""; alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.emptyList(); maxAlternatives_ = 0; profanityFilter_ = false; adaptation_ = null; if (adaptationBuilder_ != null) { adaptationBuilder_.dispose(); adaptationBuilder_ = null; } transcriptNormalization_ = null; if (transcriptNormalizationBuilder_ != null) { transcriptNormalizationBuilder_.dispose(); transcriptNormalizationBuilder_ = null; } if (speechContextsBuilder_ == null) { speechContexts_ = java.util.Collections.emptyList(); } else { speechContexts_ = null; speechContextsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000400); enableWordTimeOffsets_ = false; enableWordConfidence_ = false; enableAutomaticPunctuation_ = false; enableSpokenPunctuation_ = null; if (enableSpokenPunctuationBuilder_ != null) { enableSpokenPunctuationBuilder_.dispose(); enableSpokenPunctuationBuilder_ = null; } enableSpokenEmojis_ = null; if (enableSpokenEmojisBuilder_ != null) { enableSpokenEmojisBuilder_.dispose(); enableSpokenEmojisBuilder_ = null; } diarizationConfig_ = null; if (diarizationConfigBuilder_ != null) { diarizationConfigBuilder_.dispose(); diarizationConfigBuilder_ = null; } metadata_ = null; if (metadataBuilder_ != null) { metadataBuilder_.dispose(); metadataBuilder_ = null; } model_ = ""; useEnhanced_ = false; return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.speech.v1.SpeechProto .internal_static_google_cloud_speech_v1_RecognitionConfig_descriptor; } @java.lang.Override public com.google.cloud.speech.v1.RecognitionConfig getDefaultInstanceForType() { return com.google.cloud.speech.v1.RecognitionConfig.getDefaultInstance(); } @java.lang.Override public com.google.cloud.speech.v1.RecognitionConfig build() { com.google.cloud.speech.v1.RecognitionConfig result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.speech.v1.RecognitionConfig buildPartial() { com.google.cloud.speech.v1.RecognitionConfig result = new com.google.cloud.speech.v1.RecognitionConfig(this); buildPartialRepeatedFields(result); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartialRepeatedFields(com.google.cloud.speech.v1.RecognitionConfig result) { if (speechContextsBuilder_ == null) { if (((bitField0_ & 0x00000400) != 0)) { speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_); bitField0_ = (bitField0_ & ~0x00000400); } result.speechContexts_ = speechContexts_; } else { result.speechContexts_ = speechContextsBuilder_.build(); } } private void buildPartial0(com.google.cloud.speech.v1.RecognitionConfig result) { int from_bitField0_ = bitField0_; if (((from_bitField0_ & 0x00000001) != 0)) { result.encoding_ = encoding_; } if (((from_bitField0_ & 0x00000002) != 0)) { result.sampleRateHertz_ = sampleRateHertz_; } if (((from_bitField0_ & 0x00000004) != 0)) { result.audioChannelCount_ = audioChannelCount_; } if (((from_bitField0_ & 0x00000008) != 0)) { result.enableSeparateRecognitionPerChannel_ = enableSeparateRecognitionPerChannel_; } if (((from_bitField0_ & 0x00000010) != 0)) { result.languageCode_ = languageCode_; } if (((from_bitField0_ & 0x00000020) != 0)) { alternativeLanguageCodes_.makeImmutable(); result.alternativeLanguageCodes_ = alternativeLanguageCodes_; } if (((from_bitField0_ & 0x00000040) != 0)) { result.maxAlternatives_ = maxAlternatives_; } if (((from_bitField0_ & 0x00000080) != 0)) { result.profanityFilter_ = profanityFilter_; } int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000100) != 0)) { result.adaptation_ = adaptationBuilder_ == null ? adaptation_ : adaptationBuilder_.build(); to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000200) != 0)) { result.transcriptNormalization_ = transcriptNormalizationBuilder_ == null ? transcriptNormalization_ : transcriptNormalizationBuilder_.build(); to_bitField0_ |= 0x00000002; } if (((from_bitField0_ & 0x00000800) != 0)) { result.enableWordTimeOffsets_ = enableWordTimeOffsets_; } if (((from_bitField0_ & 0x00001000) != 0)) { result.enableWordConfidence_ = enableWordConfidence_; } if (((from_bitField0_ & 0x00002000) != 0)) { result.enableAutomaticPunctuation_ = enableAutomaticPunctuation_; } if (((from_bitField0_ & 0x00004000) != 0)) { result.enableSpokenPunctuation_ = enableSpokenPunctuationBuilder_ == null ? enableSpokenPunctuation_ : enableSpokenPunctuationBuilder_.build(); to_bitField0_ |= 0x00000004; } if (((from_bitField0_ & 0x00008000) != 0)) { result.enableSpokenEmojis_ = enableSpokenEmojisBuilder_ == null ? enableSpokenEmojis_ : enableSpokenEmojisBuilder_.build(); to_bitField0_ |= 0x00000008; } if (((from_bitField0_ & 0x00010000) != 0)) { result.diarizationConfig_ = diarizationConfigBuilder_ == null ? diarizationConfig_ : diarizationConfigBuilder_.build(); to_bitField0_ |= 0x00000010; } if (((from_bitField0_ & 0x00020000) != 0)) { result.metadata_ = metadataBuilder_ == null ? metadata_ : metadataBuilder_.build(); to_bitField0_ |= 0x00000020; } if (((from_bitField0_ & 0x00040000) != 0)) { result.model_ = model_; } if (((from_bitField0_ & 0x00080000) != 0)) { result.useEnhanced_ = useEnhanced_; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.speech.v1.RecognitionConfig) { return mergeFrom((com.google.cloud.speech.v1.RecognitionConfig) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.speech.v1.RecognitionConfig other) { if (other == com.google.cloud.speech.v1.RecognitionConfig.getDefaultInstance()) return this; if (other.encoding_ != 0) { setEncodingValue(other.getEncodingValue()); } if (other.getSampleRateHertz() != 0) { setSampleRateHertz(other.getSampleRateHertz()); } if (other.getAudioChannelCount() != 0) { setAudioChannelCount(other.getAudioChannelCount()); } if (other.getEnableSeparateRecognitionPerChannel() != false) { setEnableSeparateRecognitionPerChannel(other.getEnableSeparateRecognitionPerChannel()); } if (!other.getLanguageCode().isEmpty()) { languageCode_ = other.languageCode_; bitField0_ |= 0x00000010; onChanged(); } if (!other.alternativeLanguageCodes_.isEmpty()) { if (alternativeLanguageCodes_.isEmpty()) { alternativeLanguageCodes_ = other.alternativeLanguageCodes_; bitField0_ |= 0x00000020; } else { ensureAlternativeLanguageCodesIsMutable(); alternativeLanguageCodes_.addAll(other.alternativeLanguageCodes_); } onChanged(); } if (other.getMaxAlternatives() != 0) { setMaxAlternatives(other.getMaxAlternatives()); } if (other.getProfanityFilter() != false) { setProfanityFilter(other.getProfanityFilter()); } if (other.hasAdaptation()) { mergeAdaptation(other.getAdaptation()); } if (other.hasTranscriptNormalization()) { mergeTranscriptNormalization(other.getTranscriptNormalization()); } if (speechContextsBuilder_ == null) { if (!other.speechContexts_.isEmpty()) { if (speechContexts_.isEmpty()) { speechContexts_ = other.speechContexts_; bitField0_ = (bitField0_ & ~0x00000400); } else { ensureSpeechContextsIsMutable(); speechContexts_.addAll(other.speechContexts_); } onChanged(); } } else { if (!other.speechContexts_.isEmpty()) { if (speechContextsBuilder_.isEmpty()) { speechContextsBuilder_.dispose(); speechContextsBuilder_ = null; speechContexts_ = other.speechContexts_; bitField0_ = (bitField0_ & ~0x00000400); speechContextsBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getSpeechContextsFieldBuilder() : null; } else { speechContextsBuilder_.addAllMessages(other.speechContexts_); } } } if (other.getEnableWordTimeOffsets() != false) { setEnableWordTimeOffsets(other.getEnableWordTimeOffsets()); } if (other.getEnableWordConfidence() != false) { setEnableWordConfidence(other.getEnableWordConfidence()); } if (other.getEnableAutomaticPunctuation() != false) { setEnableAutomaticPunctuation(other.getEnableAutomaticPunctuation()); } if (other.hasEnableSpokenPunctuation()) { mergeEnableSpokenPunctuation(other.getEnableSpokenPunctuation()); } if (other.hasEnableSpokenEmojis()) { mergeEnableSpokenEmojis(other.getEnableSpokenEmojis()); } if (other.hasDiarizationConfig()) { mergeDiarizationConfig(other.getDiarizationConfig()); } if (other.hasMetadata()) { mergeMetadata(other.getMetadata()); } if (!other.getModel().isEmpty()) { model_ = other.model_; bitField0_ |= 0x00040000; onChanged(); } if (other.getUseEnhanced() != false) { setUseEnhanced(other.getUseEnhanced()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { encoding_ = input.readEnum(); bitField0_ |= 0x00000001; break; } // case 8 case 16: { sampleRateHertz_ = input.readInt32(); bitField0_ |= 0x00000002; break; } // case 16 case 26: { languageCode_ = input.readStringRequireUtf8(); bitField0_ |= 0x00000010; break; } // case 26 case 32: { maxAlternatives_ = input.readInt32(); bitField0_ |= 0x00000040; break; } // case 32 case 40: { profanityFilter_ = input.readBool(); bitField0_ |= 0x00000080; break; } // case 40 case 50: { com.google.cloud.speech.v1.SpeechContext m = input.readMessage( com.google.cloud.speech.v1.SpeechContext.parser(), extensionRegistry); if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.add(m); } else { speechContextsBuilder_.addMessage(m); } break; } // case 50 case 56: { audioChannelCount_ = input.readInt32(); bitField0_ |= 0x00000004; break; } // case 56 case 64: { enableWordTimeOffsets_ = input.readBool(); bitField0_ |= 0x00000800; break; } // case 64 case 74: { input.readMessage(getMetadataFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00020000; break; } // case 74 case 88: { enableAutomaticPunctuation_ = input.readBool(); bitField0_ |= 0x00002000; break; } // case 88 case 96: { enableSeparateRecognitionPerChannel_ = input.readBool(); bitField0_ |= 0x00000008; break; } // case 96 case 106: { model_ = input.readStringRequireUtf8(); bitField0_ |= 0x00040000; break; } // case 106 case 112: { useEnhanced_ = input.readBool(); bitField0_ |= 0x00080000; break; } // case 112 case 120: { enableWordConfidence_ = input.readBool(); bitField0_ |= 0x00001000; break; } // case 120 case 146: { java.lang.String s = input.readStringRequireUtf8(); ensureAlternativeLanguageCodesIsMutable(); alternativeLanguageCodes_.add(s); break; } // case 146 case 154: { input.readMessage( getDiarizationConfigFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00010000; break; } // case 154 case 162: { input.readMessage(getAdaptationFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000100; break; } // case 162 case 178: { input.readMessage( getEnableSpokenPunctuationFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00004000; break; } // case 178 case 186: { input.readMessage( getEnableSpokenEmojisFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00008000; break; } // case 186 case 194: { input.readMessage( getTranscriptNormalizationFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000200; break; } // case 194 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private int encoding_ = 0; /** * * *
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
     * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; * * @return The enum numeric value on the wire for encoding. */ @java.lang.Override public int getEncodingValue() { return encoding_; } /** * * *
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
     * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; * * @param value The enum numeric value on the wire for encoding to set. * @return This builder for chaining. */ public Builder setEncodingValue(int value) { encoding_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * * *
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
     * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; * * @return The encoding. */ @java.lang.Override public com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding getEncoding() { com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding result = com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.forNumber(encoding_); return result == null ? com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.UNRECOGNIZED : result; } /** * * *
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
     * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; * * @param value The encoding to set. * @return This builder for chaining. */ public Builder setEncoding(com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; encoding_ = value.getNumber(); onChanged(); return this; } /** * * *
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
     * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; * * @return This builder for chaining. */ public Builder clearEncoding() { bitField0_ = (bitField0_ & ~0x00000001); encoding_ = 0; onChanged(); return this; } private int sampleRateHertz_; /** * * *
     * Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * This field is optional for FLAC and WAV audio files, but is
     * required for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
     * 
* * int32 sample_rate_hertz = 2; * * @return The sampleRateHertz. */ @java.lang.Override public int getSampleRateHertz() { return sampleRateHertz_; } /** * * *
     * Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * This field is optional for FLAC and WAV audio files, but is
     * required for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
     * 
* * int32 sample_rate_hertz = 2; * * @param value The sampleRateHertz to set. * @return This builder for chaining. */ public Builder setSampleRateHertz(int value) { sampleRateHertz_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * * *
     * Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * This field is optional for FLAC and WAV audio files, but is
     * required for all other audio formats. For details, see
     * [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
     * 
* * int32 sample_rate_hertz = 2; * * @return This builder for chaining. */ public Builder clearSampleRateHertz() { bitField0_ = (bitField0_ & ~0x00000002); sampleRateHertz_ = 0; onChanged(); return this; } private int audioChannelCount_; /** * * *
     * The number of channels in the input audio data.
     * ONLY set this for MULTI-CHANNEL recognition.
     * Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
     * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
     * If `0` or omitted, defaults to one channel (mono).
     * Note: We only recognize the first channel by default.
     * To perform independent recognition on each channel set
     * `enable_separate_recognition_per_channel` to 'true'.
     * 
* * int32 audio_channel_count = 7; * * @return The audioChannelCount. */ @java.lang.Override public int getAudioChannelCount() { return audioChannelCount_; } /** * * *
     * The number of channels in the input audio data.
     * ONLY set this for MULTI-CHANNEL recognition.
     * Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
     * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
     * If `0` or omitted, defaults to one channel (mono).
     * Note: We only recognize the first channel by default.
     * To perform independent recognition on each channel set
     * `enable_separate_recognition_per_channel` to 'true'.
     * 
* * int32 audio_channel_count = 7; * * @param value The audioChannelCount to set. * @return This builder for chaining. */ public Builder setAudioChannelCount(int value) { audioChannelCount_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * * *
     * The number of channels in the input audio data.
     * ONLY set this for MULTI-CHANNEL recognition.
     * Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`.
     * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
     * If `0` or omitted, defaults to one channel (mono).
     * Note: We only recognize the first channel by default.
     * To perform independent recognition on each channel set
     * `enable_separate_recognition_per_channel` to 'true'.
     * 
* * int32 audio_channel_count = 7; * * @return This builder for chaining. */ public Builder clearAudioChannelCount() { bitField0_ = (bitField0_ & ~0x00000004); audioChannelCount_ = 0; onChanged(); return this; } private boolean enableSeparateRecognitionPerChannel_; /** * * *
     * This needs to be set to `true` explicitly and `audio_channel_count` > 1
     * to get each channel recognized separately. The recognition result will
     * contain a `channel_tag` field to state which channel that result belongs
     * to. If this is not true, we will only recognize the first channel. The
     * request is billed cumulatively for all channels recognized:
     * `audio_channel_count` multiplied by the length of the audio.
     * 
* * bool enable_separate_recognition_per_channel = 12; * * @return The enableSeparateRecognitionPerChannel. */ @java.lang.Override public boolean getEnableSeparateRecognitionPerChannel() { return enableSeparateRecognitionPerChannel_; } /** * * *
     * This needs to be set to `true` explicitly and `audio_channel_count` > 1
     * to get each channel recognized separately. The recognition result will
     * contain a `channel_tag` field to state which channel that result belongs
     * to. If this is not true, we will only recognize the first channel. The
     * request is billed cumulatively for all channels recognized:
     * `audio_channel_count` multiplied by the length of the audio.
     * 
* * bool enable_separate_recognition_per_channel = 12; * * @param value The enableSeparateRecognitionPerChannel to set. * @return This builder for chaining. */ public Builder setEnableSeparateRecognitionPerChannel(boolean value) { enableSeparateRecognitionPerChannel_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * * *
     * This needs to be set to `true` explicitly and `audio_channel_count` > 1
     * to get each channel recognized separately. The recognition result will
     * contain a `channel_tag` field to state which channel that result belongs
     * to. If this is not true, we will only recognize the first channel. The
     * request is billed cumulatively for all channels recognized:
     * `audio_channel_count` multiplied by the length of the audio.
     * 
* * bool enable_separate_recognition_per_channel = 12; * * @return This builder for chaining. */ public Builder clearEnableSeparateRecognitionPerChannel() { bitField0_ = (bitField0_ & ~0x00000008); enableSeparateRecognitionPerChannel_ = false; onChanged(); return this; } private java.lang.Object languageCode_ = ""; /** * * *
     * Required. The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes.
     * 
* * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; * * @return The languageCode. */ public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); languageCode_ = s; return s; } else { return (java.lang.String) ref; } } /** * * *
     * Required. The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes.
     * 
* * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; * * @return The bytes for languageCode. */ public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); languageCode_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * *
     * Required. The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes.
     * 
* * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; * * @param value The languageCode to set. * @return This builder for chaining. */ public Builder setLanguageCode(java.lang.String value) { if (value == null) { throw new NullPointerException(); } languageCode_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * * *
     * Required. The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes.
     * 
* * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; * * @return This builder for chaining. */ public Builder clearLanguageCode() { languageCode_ = getDefaultInstance().getLanguageCode(); bitField0_ = (bitField0_ & ~0x00000010); onChanged(); return this; } /** * * *
     * Required. The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes.
     * 
* * string language_code = 3 [(.google.api.field_behavior) = REQUIRED]; * * @param value The bytes for languageCode to set. * @return This builder for chaining. */ public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); languageCode_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } private com.google.protobuf.LazyStringArrayList alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.emptyList(); private void ensureAlternativeLanguageCodesIsMutable() { if (!alternativeLanguageCodes_.isModifiable()) { alternativeLanguageCodes_ = new com.google.protobuf.LazyStringArrayList(alternativeLanguageCodes_); } bitField0_ |= 0x00000020; } /** * * *
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; * * @return A list containing the alternativeLanguageCodes. */ public com.google.protobuf.ProtocolStringList getAlternativeLanguageCodesList() { alternativeLanguageCodes_.makeImmutable(); return alternativeLanguageCodes_; } /** * * *
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; * * @return The count of alternativeLanguageCodes. */ public int getAlternativeLanguageCodesCount() { return alternativeLanguageCodes_.size(); } /** * * *
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; * * @param index The index of the element to return. * @return The alternativeLanguageCodes at the given index. */ public java.lang.String getAlternativeLanguageCodes(int index) { return alternativeLanguageCodes_.get(index); } /** * * *
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; * * @param index The index of the value to return. * @return The bytes of the alternativeLanguageCodes at the given index. */ public com.google.protobuf.ByteString getAlternativeLanguageCodesBytes(int index) { return alternativeLanguageCodes_.getByteString(index); } /** * * *
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; * * @param index The index to set the value at. * @param value The alternativeLanguageCodes to set. * @return This builder for chaining. */ public Builder setAlternativeLanguageCodes(int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureAlternativeLanguageCodesIsMutable(); alternativeLanguageCodes_.set(index, value); bitField0_ |= 0x00000020; onChanged(); return this; } /** * * *
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; * * @param value The alternativeLanguageCodes to add. * @return This builder for chaining. */ public Builder addAlternativeLanguageCodes(java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureAlternativeLanguageCodesIsMutable(); alternativeLanguageCodes_.add(value); bitField0_ |= 0x00000020; onChanged(); return this; } /** * * *
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; * * @param values The alternativeLanguageCodes to add. * @return This builder for chaining. */ public Builder addAllAlternativeLanguageCodes(java.lang.Iterable values) { ensureAlternativeLanguageCodesIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll(values, alternativeLanguageCodes_); bitField0_ |= 0x00000020; onChanged(); return this; } /** * * *
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; * * @return This builder for chaining. */ public Builder clearAlternativeLanguageCodes() { alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); ; onChanged(); return this; } /** * * *
     * A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language
     * Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
     * of the currently supported language codes. If alternative languages are
     * listed, recognition result will contain recognition in the most likely
     * language detected including the main language_code. The recognition result
     * will include the language tag of the language detected in the audio. Note:
     * This feature is only supported for Voice Command and Voice Search use cases
     * and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; * * @param value The bytes of the alternativeLanguageCodes to add. * @return This builder for chaining. */ public Builder addAlternativeLanguageCodesBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); ensureAlternativeLanguageCodesIsMutable(); alternativeLanguageCodes_.add(value); bitField0_ |= 0x00000020; onChanged(); return this; } private int maxAlternatives_; /** * * *
     * Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * 
* * int32 max_alternatives = 4; * * @return The maxAlternatives. */ @java.lang.Override public int getMaxAlternatives() { return maxAlternatives_; } /** * * *
     * Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * 
* * int32 max_alternatives = 4; * * @param value The maxAlternatives to set. * @return This builder for chaining. */ public Builder setMaxAlternatives(int value) { maxAlternatives_ = value; bitField0_ |= 0x00000040; onChanged(); return this; } /** * * *
     * Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * 
* * int32 max_alternatives = 4; * * @return This builder for chaining. */ public Builder clearMaxAlternatives() { bitField0_ = (bitField0_ & ~0x00000040); maxAlternatives_ = 0; onChanged(); return this; } private boolean profanityFilter_; /** * * *
     * If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * 
* * bool profanity_filter = 5; * * @return The profanityFilter. */ @java.lang.Override public boolean getProfanityFilter() { return profanityFilter_; } /** * * *
     * If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * 
* * bool profanity_filter = 5; * * @param value The profanityFilter to set. * @return This builder for chaining. */ public Builder setProfanityFilter(boolean value) { profanityFilter_ = value; bitField0_ |= 0x00000080; onChanged(); return this; } /** * * *
     * If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * 
* * bool profanity_filter = 5; * * @return This builder for chaining. */ public Builder clearProfanityFilter() { bitField0_ = (bitField0_ & ~0x00000080); profanityFilter_ = false; onChanged(); return this; } private com.google.cloud.speech.v1.SpeechAdaptation adaptation_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.SpeechAdaptation, com.google.cloud.speech.v1.SpeechAdaptation.Builder, com.google.cloud.speech.v1.SpeechAdaptationOrBuilder> adaptationBuilder_; /** * * *
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * 
* * .google.cloud.speech.v1.SpeechAdaptation adaptation = 20; * * @return Whether the adaptation field is set. */ public boolean hasAdaptation() { return ((bitField0_ & 0x00000100) != 0); } /** * * *
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * 
* * .google.cloud.speech.v1.SpeechAdaptation adaptation = 20; * * @return The adaptation. */ public com.google.cloud.speech.v1.SpeechAdaptation getAdaptation() { if (adaptationBuilder_ == null) { return adaptation_ == null ? com.google.cloud.speech.v1.SpeechAdaptation.getDefaultInstance() : adaptation_; } else { return adaptationBuilder_.getMessage(); } } /** * * *
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * 
* * .google.cloud.speech.v1.SpeechAdaptation adaptation = 20; */ public Builder setAdaptation(com.google.cloud.speech.v1.SpeechAdaptation value) { if (adaptationBuilder_ == null) { if (value == null) { throw new NullPointerException(); } adaptation_ = value; } else { adaptationBuilder_.setMessage(value); } bitField0_ |= 0x00000100; onChanged(); return this; } /** * * *
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * 
* * .google.cloud.speech.v1.SpeechAdaptation adaptation = 20; */ public Builder setAdaptation( com.google.cloud.speech.v1.SpeechAdaptation.Builder builderForValue) { if (adaptationBuilder_ == null) { adaptation_ = builderForValue.build(); } else { adaptationBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000100; onChanged(); return this; } /** * * *
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * 
* * .google.cloud.speech.v1.SpeechAdaptation adaptation = 20; */ public Builder mergeAdaptation(com.google.cloud.speech.v1.SpeechAdaptation value) { if (adaptationBuilder_ == null) { if (((bitField0_ & 0x00000100) != 0) && adaptation_ != null && adaptation_ != com.google.cloud.speech.v1.SpeechAdaptation.getDefaultInstance()) { getAdaptationBuilder().mergeFrom(value); } else { adaptation_ = value; } } else { adaptationBuilder_.mergeFrom(value); } if (adaptation_ != null) { bitField0_ |= 0x00000100; onChanged(); } return this; } /** * * *
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * 
* * .google.cloud.speech.v1.SpeechAdaptation adaptation = 20; */ public Builder clearAdaptation() { bitField0_ = (bitField0_ & ~0x00000100); adaptation_ = null; if (adaptationBuilder_ != null) { adaptationBuilder_.dispose(); adaptationBuilder_ = null; } onChanged(); return this; } /** * * *
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * 
* * .google.cloud.speech.v1.SpeechAdaptation adaptation = 20; */ public com.google.cloud.speech.v1.SpeechAdaptation.Builder getAdaptationBuilder() { bitField0_ |= 0x00000100; onChanged(); return getAdaptationFieldBuilder().getBuilder(); } /** * * *
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * 
* * .google.cloud.speech.v1.SpeechAdaptation adaptation = 20; */ public com.google.cloud.speech.v1.SpeechAdaptationOrBuilder getAdaptationOrBuilder() { if (adaptationBuilder_ != null) { return adaptationBuilder_.getMessageOrBuilder(); } else { return adaptation_ == null ? com.google.cloud.speech.v1.SpeechAdaptation.getDefaultInstance() : adaptation_; } } /** * * *
     * Speech adaptation configuration improves the accuracy of speech
     * recognition. For more information, see the [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation)
     * documentation.
     * When speech adaptation is set it supersedes the `speech_contexts` field.
     * 
* * .google.cloud.speech.v1.SpeechAdaptation adaptation = 20; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.SpeechAdaptation, com.google.cloud.speech.v1.SpeechAdaptation.Builder, com.google.cloud.speech.v1.SpeechAdaptationOrBuilder> getAdaptationFieldBuilder() { if (adaptationBuilder_ == null) { adaptationBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.SpeechAdaptation, com.google.cloud.speech.v1.SpeechAdaptation.Builder, com.google.cloud.speech.v1.SpeechAdaptationOrBuilder>( getAdaptation(), getParentForChildren(), isClean()); adaptation_ = null; } return adaptationBuilder_; } private com.google.cloud.speech.v1.TranscriptNormalization transcriptNormalization_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.TranscriptNormalization, com.google.cloud.speech.v1.TranscriptNormalization.Builder, com.google.cloud.speech.v1.TranscriptNormalizationOrBuilder> transcriptNormalizationBuilder_; /** * * *
     * Optional. Use transcription normalization to automatically replace parts of
     * the transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability > 0.8)
     * and final transcripts.
     * 
* * * .google.cloud.speech.v1.TranscriptNormalization transcript_normalization = 24 [(.google.api.field_behavior) = OPTIONAL]; * * * @return Whether the transcriptNormalization field is set. */ public boolean hasTranscriptNormalization() { return ((bitField0_ & 0x00000200) != 0); } /** * * *
     * Optional. Use transcription normalization to automatically replace parts of
     * the transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability > 0.8)
     * and final transcripts.
     * 
* * * .google.cloud.speech.v1.TranscriptNormalization transcript_normalization = 24 [(.google.api.field_behavior) = OPTIONAL]; * * * @return The transcriptNormalization. */ public com.google.cloud.speech.v1.TranscriptNormalization getTranscriptNormalization() { if (transcriptNormalizationBuilder_ == null) { return transcriptNormalization_ == null ? com.google.cloud.speech.v1.TranscriptNormalization.getDefaultInstance() : transcriptNormalization_; } else { return transcriptNormalizationBuilder_.getMessage(); } } /** * * *
     * Optional. Use transcription normalization to automatically replace parts of
     * the transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability > 0.8)
     * and final transcripts.
     * 
* * * .google.cloud.speech.v1.TranscriptNormalization transcript_normalization = 24 [(.google.api.field_behavior) = OPTIONAL]; * */ public Builder setTranscriptNormalization( com.google.cloud.speech.v1.TranscriptNormalization value) { if (transcriptNormalizationBuilder_ == null) { if (value == null) { throw new NullPointerException(); } transcriptNormalization_ = value; } else { transcriptNormalizationBuilder_.setMessage(value); } bitField0_ |= 0x00000200; onChanged(); return this; } /** * * *
     * Optional. Use transcription normalization to automatically replace parts of
     * the transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability > 0.8)
     * and final transcripts.
     * 
* * * .google.cloud.speech.v1.TranscriptNormalization transcript_normalization = 24 [(.google.api.field_behavior) = OPTIONAL]; * */ public Builder setTranscriptNormalization( com.google.cloud.speech.v1.TranscriptNormalization.Builder builderForValue) { if (transcriptNormalizationBuilder_ == null) { transcriptNormalization_ = builderForValue.build(); } else { transcriptNormalizationBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000200; onChanged(); return this; } /** * * *
     * Optional. Use transcription normalization to automatically replace parts of
     * the transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability > 0.8)
     * and final transcripts.
     * 
* * * .google.cloud.speech.v1.TranscriptNormalization transcript_normalization = 24 [(.google.api.field_behavior) = OPTIONAL]; * */ public Builder mergeTranscriptNormalization( com.google.cloud.speech.v1.TranscriptNormalization value) { if (transcriptNormalizationBuilder_ == null) { if (((bitField0_ & 0x00000200) != 0) && transcriptNormalization_ != null && transcriptNormalization_ != com.google.cloud.speech.v1.TranscriptNormalization.getDefaultInstance()) { getTranscriptNormalizationBuilder().mergeFrom(value); } else { transcriptNormalization_ = value; } } else { transcriptNormalizationBuilder_.mergeFrom(value); } if (transcriptNormalization_ != null) { bitField0_ |= 0x00000200; onChanged(); } return this; } /** * * *
     * Optional. Use transcription normalization to automatically replace parts of
     * the transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability > 0.8)
     * and final transcripts.
     * 
* * * .google.cloud.speech.v1.TranscriptNormalization transcript_normalization = 24 [(.google.api.field_behavior) = OPTIONAL]; * */ public Builder clearTranscriptNormalization() { bitField0_ = (bitField0_ & ~0x00000200); transcriptNormalization_ = null; if (transcriptNormalizationBuilder_ != null) { transcriptNormalizationBuilder_.dispose(); transcriptNormalizationBuilder_ = null; } onChanged(); return this; } /** * * *
     * Optional. Use transcription normalization to automatically replace parts of
     * the transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability > 0.8)
     * and final transcripts.
     * 
* * * .google.cloud.speech.v1.TranscriptNormalization transcript_normalization = 24 [(.google.api.field_behavior) = OPTIONAL]; * */ public com.google.cloud.speech.v1.TranscriptNormalization.Builder getTranscriptNormalizationBuilder() { bitField0_ |= 0x00000200; onChanged(); return getTranscriptNormalizationFieldBuilder().getBuilder(); } /** * * *
     * Optional. Use transcription normalization to automatically replace parts of
     * the transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability > 0.8)
     * and final transcripts.
     * 
* * * .google.cloud.speech.v1.TranscriptNormalization transcript_normalization = 24 [(.google.api.field_behavior) = OPTIONAL]; * */ public com.google.cloud.speech.v1.TranscriptNormalizationOrBuilder getTranscriptNormalizationOrBuilder() { if (transcriptNormalizationBuilder_ != null) { return transcriptNormalizationBuilder_.getMessageOrBuilder(); } else { return transcriptNormalization_ == null ? com.google.cloud.speech.v1.TranscriptNormalization.getDefaultInstance() : transcriptNormalization_; } } /** * * *
     * Optional. Use transcription normalization to automatically replace parts of
     * the transcript with phrases of your choosing. For StreamingRecognize, this
     * normalization only applies to stable partial transcripts (stability > 0.8)
     * and final transcripts.
     * 
* * * .google.cloud.speech.v1.TranscriptNormalization transcript_normalization = 24 [(.google.api.field_behavior) = OPTIONAL]; * */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.TranscriptNormalization, com.google.cloud.speech.v1.TranscriptNormalization.Builder, com.google.cloud.speech.v1.TranscriptNormalizationOrBuilder> getTranscriptNormalizationFieldBuilder() { if (transcriptNormalizationBuilder_ == null) { transcriptNormalizationBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.TranscriptNormalization, com.google.cloud.speech.v1.TranscriptNormalization.Builder, com.google.cloud.speech.v1.TranscriptNormalizationOrBuilder>( getTranscriptNormalization(), getParentForChildren(), isClean()); transcriptNormalization_ = null; } return transcriptNormalizationBuilder_; } private java.util.List speechContexts_ = java.util.Collections.emptyList(); private void ensureSpeechContextsIsMutable() { if (!((bitField0_ & 0x00000400) != 0)) { speechContexts_ = new java.util.ArrayList(speechContexts_); bitField0_ |= 0x00000400; } } private com.google.protobuf.RepeatedFieldBuilderV3< com.google.cloud.speech.v1.SpeechContext, com.google.cloud.speech.v1.SpeechContext.Builder, com.google.cloud.speech.v1.SpeechContextOrBuilder> speechContextsBuilder_; /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsList() { if (speechContextsBuilder_ == null) { return java.util.Collections.unmodifiableList(speechContexts_); } else { return speechContextsBuilder_.getMessageList(); } } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public int getSpeechContextsCount() { if (speechContextsBuilder_ == null) { return speechContexts_.size(); } else { return speechContextsBuilder_.getCount(); } } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1.SpeechContext getSpeechContexts(int index) { if (speechContextsBuilder_ == null) { return speechContexts_.get(index); } else { return speechContextsBuilder_.getMessage(index); } } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder setSpeechContexts(int index, com.google.cloud.speech.v1.SpeechContext value) { if (speechContextsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSpeechContextsIsMutable(); speechContexts_.set(index, value); onChanged(); } else { speechContextsBuilder_.setMessage(index, value); } return this; } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder setSpeechContexts( int index, com.google.cloud.speech.v1.SpeechContext.Builder builderForValue) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.set(index, builderForValue.build()); onChanged(); } else { speechContextsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder addSpeechContexts(com.google.cloud.speech.v1.SpeechContext value) { if (speechContextsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSpeechContextsIsMutable(); speechContexts_.add(value); onChanged(); } else { speechContextsBuilder_.addMessage(value); } return this; } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder addSpeechContexts(int index, com.google.cloud.speech.v1.SpeechContext value) { if (speechContextsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSpeechContextsIsMutable(); speechContexts_.add(index, value); onChanged(); } else { speechContextsBuilder_.addMessage(index, value); } return this; } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder addSpeechContexts( com.google.cloud.speech.v1.SpeechContext.Builder builderForValue) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.add(builderForValue.build()); onChanged(); } else { speechContextsBuilder_.addMessage(builderForValue.build()); } return this; } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder addSpeechContexts( int index, com.google.cloud.speech.v1.SpeechContext.Builder builderForValue) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.add(index, builderForValue.build()); onChanged(); } else { speechContextsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder addAllSpeechContexts( java.lang.Iterable values) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll(values, speechContexts_); onChanged(); } else { speechContextsBuilder_.addAllMessages(values); } return this; } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder clearSpeechContexts() { if (speechContextsBuilder_ == null) { speechContexts_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000400); onChanged(); } else { speechContextsBuilder_.clear(); } return this; } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder removeSpeechContexts(int index) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.remove(index); onChanged(); } else { speechContextsBuilder_.remove(index); } return this; } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1.SpeechContext.Builder getSpeechContextsBuilder(int index) { return getSpeechContextsFieldBuilder().getBuilder(index); } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1.SpeechContextOrBuilder getSpeechContextsOrBuilder(int index) { if (speechContextsBuilder_ == null) { return speechContexts_.get(index); } else { return speechContextsBuilder_.getMessageOrBuilder(index); } } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsOrBuilderList() { if (speechContextsBuilder_ != null) { return speechContextsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(speechContexts_); } } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1.SpeechContext.Builder addSpeechContextsBuilder() { return getSpeechContextsFieldBuilder() .addBuilder(com.google.cloud.speech.v1.SpeechContext.getDefaultInstance()); } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1.SpeechContext.Builder addSpeechContextsBuilder(int index) { return getSpeechContextsFieldBuilder() .addBuilder(index, com.google.cloud.speech.v1.SpeechContext.getDefaultInstance()); } /** * * *
     * Array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see
     * [speech
     * adaptation](https://cloud.google.com/speech-to-text/docs/adaptation).
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsBuilderList() { return getSpeechContextsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< com.google.cloud.speech.v1.SpeechContext, com.google.cloud.speech.v1.SpeechContext.Builder, com.google.cloud.speech.v1.SpeechContextOrBuilder> getSpeechContextsFieldBuilder() { if (speechContextsBuilder_ == null) { speechContextsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< com.google.cloud.speech.v1.SpeechContext, com.google.cloud.speech.v1.SpeechContext.Builder, com.google.cloud.speech.v1.SpeechContextOrBuilder>( speechContexts_, ((bitField0_ & 0x00000400) != 0), getParentForChildren(), isClean()); speechContexts_ = null; } return speechContextsBuilder_; } private boolean enableWordTimeOffsets_; /** * * *
     * If `true`, the top result includes a list of words and
     * the start and end time offsets (timestamps) for those words. If
     * `false`, no word-level time offset information is returned. The default is
     * `false`.
     * 
* * bool enable_word_time_offsets = 8; * * @return The enableWordTimeOffsets. */ @java.lang.Override public boolean getEnableWordTimeOffsets() { return enableWordTimeOffsets_; } /** * * *
     * If `true`, the top result includes a list of words and
     * the start and end time offsets (timestamps) for those words. If
     * `false`, no word-level time offset information is returned. The default is
     * `false`.
     * 
* * bool enable_word_time_offsets = 8; * * @param value The enableWordTimeOffsets to set. * @return This builder for chaining. */ public Builder setEnableWordTimeOffsets(boolean value) { enableWordTimeOffsets_ = value; bitField0_ |= 0x00000800; onChanged(); return this; } /** * * *
     * If `true`, the top result includes a list of words and
     * the start and end time offsets (timestamps) for those words. If
     * `false`, no word-level time offset information is returned. The default is
     * `false`.
     * 
* * bool enable_word_time_offsets = 8; * * @return This builder for chaining. */ public Builder clearEnableWordTimeOffsets() { bitField0_ = (bitField0_ & ~0x00000800); enableWordTimeOffsets_ = false; onChanged(); return this; } private boolean enableWordConfidence_; /** * * *
     * If `true`, the top result includes a list of words and the
     * confidence for those words. If `false`, no word-level confidence
     * information is returned. The default is `false`.
     * 
* * bool enable_word_confidence = 15; * * @return The enableWordConfidence. */ @java.lang.Override public boolean getEnableWordConfidence() { return enableWordConfidence_; } /** * * *
     * If `true`, the top result includes a list of words and the
     * confidence for those words. If `false`, no word-level confidence
     * information is returned. The default is `false`.
     * 
* * bool enable_word_confidence = 15; * * @param value The enableWordConfidence to set. * @return This builder for chaining. */ public Builder setEnableWordConfidence(boolean value) { enableWordConfidence_ = value; bitField0_ |= 0x00001000; onChanged(); return this; } /** * * *
     * If `true`, the top result includes a list of words and the
     * confidence for those words. If `false`, no word-level confidence
     * information is returned. The default is `false`.
     * 
* * bool enable_word_confidence = 15; * * @return This builder for chaining. */ public Builder clearEnableWordConfidence() { bitField0_ = (bitField0_ & ~0x00001000); enableWordConfidence_ = false; onChanged(); return this; } private boolean enableAutomaticPunctuation_; /** * * *
     * If 'true', adds punctuation to recognition result hypotheses.
     * This feature is only available in select languages. Setting this for
     * requests in other languages has no effect at all.
     * The default 'false' value does not add punctuation to result hypotheses.
     * 
* * bool enable_automatic_punctuation = 11; * * @return The enableAutomaticPunctuation. */ @java.lang.Override public boolean getEnableAutomaticPunctuation() { return enableAutomaticPunctuation_; } /** * * *
     * If 'true', adds punctuation to recognition result hypotheses.
     * This feature is only available in select languages. Setting this for
     * requests in other languages has no effect at all.
     * The default 'false' value does not add punctuation to result hypotheses.
     * 
* * bool enable_automatic_punctuation = 11; * * @param value The enableAutomaticPunctuation to set. * @return This builder for chaining. */ public Builder setEnableAutomaticPunctuation(boolean value) { enableAutomaticPunctuation_ = value; bitField0_ |= 0x00002000; onChanged(); return this; } /** * * *
     * If 'true', adds punctuation to recognition result hypotheses.
     * This feature is only available in select languages. Setting this for
     * requests in other languages has no effect at all.
     * The default 'false' value does not add punctuation to result hypotheses.
     * 
* * bool enable_automatic_punctuation = 11; * * @return This builder for chaining. */ public Builder clearEnableAutomaticPunctuation() { bitField0_ = (bitField0_ & ~0x00002000); enableAutomaticPunctuation_ = false; onChanged(); return this; } private com.google.protobuf.BoolValue enableSpokenPunctuation_; private com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.BoolValue, com.google.protobuf.BoolValue.Builder, com.google.protobuf.BoolValueOrBuilder> enableSpokenPunctuationBuilder_; /** * * *
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_punctuation = 22; * * @return Whether the enableSpokenPunctuation field is set. */ public boolean hasEnableSpokenPunctuation() { return ((bitField0_ & 0x00004000) != 0); } /** * * *
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_punctuation = 22; * * @return The enableSpokenPunctuation. */ public com.google.protobuf.BoolValue getEnableSpokenPunctuation() { if (enableSpokenPunctuationBuilder_ == null) { return enableSpokenPunctuation_ == null ? com.google.protobuf.BoolValue.getDefaultInstance() : enableSpokenPunctuation_; } else { return enableSpokenPunctuationBuilder_.getMessage(); } } /** * * *
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_punctuation = 22; */ public Builder setEnableSpokenPunctuation(com.google.protobuf.BoolValue value) { if (enableSpokenPunctuationBuilder_ == null) { if (value == null) { throw new NullPointerException(); } enableSpokenPunctuation_ = value; } else { enableSpokenPunctuationBuilder_.setMessage(value); } bitField0_ |= 0x00004000; onChanged(); return this; } /** * * *
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_punctuation = 22; */ public Builder setEnableSpokenPunctuation( com.google.protobuf.BoolValue.Builder builderForValue) { if (enableSpokenPunctuationBuilder_ == null) { enableSpokenPunctuation_ = builderForValue.build(); } else { enableSpokenPunctuationBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00004000; onChanged(); return this; } /** * * *
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_punctuation = 22; */ public Builder mergeEnableSpokenPunctuation(com.google.protobuf.BoolValue value) { if (enableSpokenPunctuationBuilder_ == null) { if (((bitField0_ & 0x00004000) != 0) && enableSpokenPunctuation_ != null && enableSpokenPunctuation_ != com.google.protobuf.BoolValue.getDefaultInstance()) { getEnableSpokenPunctuationBuilder().mergeFrom(value); } else { enableSpokenPunctuation_ = value; } } else { enableSpokenPunctuationBuilder_.mergeFrom(value); } if (enableSpokenPunctuation_ != null) { bitField0_ |= 0x00004000; onChanged(); } return this; } /** * * *
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_punctuation = 22; */ public Builder clearEnableSpokenPunctuation() { bitField0_ = (bitField0_ & ~0x00004000); enableSpokenPunctuation_ = null; if (enableSpokenPunctuationBuilder_ != null) { enableSpokenPunctuationBuilder_.dispose(); enableSpokenPunctuationBuilder_ = null; } onChanged(); return this; } /** * * *
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_punctuation = 22; */ public com.google.protobuf.BoolValue.Builder getEnableSpokenPunctuationBuilder() { bitField0_ |= 0x00004000; onChanged(); return getEnableSpokenPunctuationFieldBuilder().getBuilder(); } /** * * *
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_punctuation = 22; */ public com.google.protobuf.BoolValueOrBuilder getEnableSpokenPunctuationOrBuilder() { if (enableSpokenPunctuationBuilder_ != null) { return enableSpokenPunctuationBuilder_.getMessageOrBuilder(); } else { return enableSpokenPunctuation_ == null ? com.google.protobuf.BoolValue.getDefaultInstance() : enableSpokenPunctuation_; } } /** * * *
     * The spoken punctuation behavior for the call
     * If not set, uses default behavior based on model of choice
     * e.g. command_and_search will enable spoken punctuation by default
     * If 'true', replaces spoken punctuation with the corresponding symbols in
     * the request. For example, "how are you question mark" becomes "how are
     * you?". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation
     * for support. If 'false', spoken punctuation is not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_punctuation = 22; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.BoolValue, com.google.protobuf.BoolValue.Builder, com.google.protobuf.BoolValueOrBuilder> getEnableSpokenPunctuationFieldBuilder() { if (enableSpokenPunctuationBuilder_ == null) { enableSpokenPunctuationBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.BoolValue, com.google.protobuf.BoolValue.Builder, com.google.protobuf.BoolValueOrBuilder>( getEnableSpokenPunctuation(), getParentForChildren(), isClean()); enableSpokenPunctuation_ = null; } return enableSpokenPunctuationBuilder_; } private com.google.protobuf.BoolValue enableSpokenEmojis_; private com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.BoolValue, com.google.protobuf.BoolValue.Builder, com.google.protobuf.BoolValueOrBuilder> enableSpokenEmojisBuilder_; /** * * *
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_emojis = 23; * * @return Whether the enableSpokenEmojis field is set. */ public boolean hasEnableSpokenEmojis() { return ((bitField0_ & 0x00008000) != 0); } /** * * *
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_emojis = 23; * * @return The enableSpokenEmojis. */ public com.google.protobuf.BoolValue getEnableSpokenEmojis() { if (enableSpokenEmojisBuilder_ == null) { return enableSpokenEmojis_ == null ? com.google.protobuf.BoolValue.getDefaultInstance() : enableSpokenEmojis_; } else { return enableSpokenEmojisBuilder_.getMessage(); } } /** * * *
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_emojis = 23; */ public Builder setEnableSpokenEmojis(com.google.protobuf.BoolValue value) { if (enableSpokenEmojisBuilder_ == null) { if (value == null) { throw new NullPointerException(); } enableSpokenEmojis_ = value; } else { enableSpokenEmojisBuilder_.setMessage(value); } bitField0_ |= 0x00008000; onChanged(); return this; } /** * * *
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_emojis = 23; */ public Builder setEnableSpokenEmojis(com.google.protobuf.BoolValue.Builder builderForValue) { if (enableSpokenEmojisBuilder_ == null) { enableSpokenEmojis_ = builderForValue.build(); } else { enableSpokenEmojisBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00008000; onChanged(); return this; } /** * * *
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_emojis = 23; */ public Builder mergeEnableSpokenEmojis(com.google.protobuf.BoolValue value) { if (enableSpokenEmojisBuilder_ == null) { if (((bitField0_ & 0x00008000) != 0) && enableSpokenEmojis_ != null && enableSpokenEmojis_ != com.google.protobuf.BoolValue.getDefaultInstance()) { getEnableSpokenEmojisBuilder().mergeFrom(value); } else { enableSpokenEmojis_ = value; } } else { enableSpokenEmojisBuilder_.mergeFrom(value); } if (enableSpokenEmojis_ != null) { bitField0_ |= 0x00008000; onChanged(); } return this; } /** * * *
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_emojis = 23; */ public Builder clearEnableSpokenEmojis() { bitField0_ = (bitField0_ & ~0x00008000); enableSpokenEmojis_ = null; if (enableSpokenEmojisBuilder_ != null) { enableSpokenEmojisBuilder_.dispose(); enableSpokenEmojisBuilder_ = null; } onChanged(); return this; } /** * * *
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_emojis = 23; */ public com.google.protobuf.BoolValue.Builder getEnableSpokenEmojisBuilder() { bitField0_ |= 0x00008000; onChanged(); return getEnableSpokenEmojisFieldBuilder().getBuilder(); } /** * * *
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_emojis = 23; */ public com.google.protobuf.BoolValueOrBuilder getEnableSpokenEmojisOrBuilder() { if (enableSpokenEmojisBuilder_ != null) { return enableSpokenEmojisBuilder_.getMessageOrBuilder(); } else { return enableSpokenEmojis_ == null ? com.google.protobuf.BoolValue.getDefaultInstance() : enableSpokenEmojis_; } } /** * * *
     * The spoken emoji behavior for the call
     * If not set, uses default behavior based on model of choice
     * If 'true', adds spoken emoji formatting for the request. This will replace
     * spoken emojis with the corresponding Unicode symbols in the final
     * transcript. If 'false', spoken emojis are not replaced.
     * 
* * .google.protobuf.BoolValue enable_spoken_emojis = 23; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.BoolValue, com.google.protobuf.BoolValue.Builder, com.google.protobuf.BoolValueOrBuilder> getEnableSpokenEmojisFieldBuilder() { if (enableSpokenEmojisBuilder_ == null) { enableSpokenEmojisBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.protobuf.BoolValue, com.google.protobuf.BoolValue.Builder, com.google.protobuf.BoolValueOrBuilder>( getEnableSpokenEmojis(), getParentForChildren(), isClean()); enableSpokenEmojis_ = null; } return enableSpokenEmojisBuilder_; } private com.google.cloud.speech.v1.SpeakerDiarizationConfig diarizationConfig_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.SpeakerDiarizationConfig, com.google.cloud.speech.v1.SpeakerDiarizationConfig.Builder, com.google.cloud.speech.v1.SpeakerDiarizationConfigOrBuilder> diarizationConfigBuilder_; /** * * *
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * 
* * .google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19; * * @return Whether the diarizationConfig field is set. */ public boolean hasDiarizationConfig() { return ((bitField0_ & 0x00010000) != 0); } /** * * *
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * 
* * .google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19; * * @return The diarizationConfig. */ public com.google.cloud.speech.v1.SpeakerDiarizationConfig getDiarizationConfig() { if (diarizationConfigBuilder_ == null) { return diarizationConfig_ == null ? com.google.cloud.speech.v1.SpeakerDiarizationConfig.getDefaultInstance() : diarizationConfig_; } else { return diarizationConfigBuilder_.getMessage(); } } /** * * *
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * 
* * .google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19; */ public Builder setDiarizationConfig(com.google.cloud.speech.v1.SpeakerDiarizationConfig value) { if (diarizationConfigBuilder_ == null) { if (value == null) { throw new NullPointerException(); } diarizationConfig_ = value; } else { diarizationConfigBuilder_.setMessage(value); } bitField0_ |= 0x00010000; onChanged(); return this; } /** * * *
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * 
* * .google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19; */ public Builder setDiarizationConfig( com.google.cloud.speech.v1.SpeakerDiarizationConfig.Builder builderForValue) { if (diarizationConfigBuilder_ == null) { diarizationConfig_ = builderForValue.build(); } else { diarizationConfigBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00010000; onChanged(); return this; } /** * * *
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * 
* * .google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19; */ public Builder mergeDiarizationConfig( com.google.cloud.speech.v1.SpeakerDiarizationConfig value) { if (diarizationConfigBuilder_ == null) { if (((bitField0_ & 0x00010000) != 0) && diarizationConfig_ != null && diarizationConfig_ != com.google.cloud.speech.v1.SpeakerDiarizationConfig.getDefaultInstance()) { getDiarizationConfigBuilder().mergeFrom(value); } else { diarizationConfig_ = value; } } else { diarizationConfigBuilder_.mergeFrom(value); } if (diarizationConfig_ != null) { bitField0_ |= 0x00010000; onChanged(); } return this; } /** * * *
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * 
* * .google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19; */ public Builder clearDiarizationConfig() { bitField0_ = (bitField0_ & ~0x00010000); diarizationConfig_ = null; if (diarizationConfigBuilder_ != null) { diarizationConfigBuilder_.dispose(); diarizationConfigBuilder_ = null; } onChanged(); return this; } /** * * *
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * 
* * .google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19; */ public com.google.cloud.speech.v1.SpeakerDiarizationConfig.Builder getDiarizationConfigBuilder() { bitField0_ |= 0x00010000; onChanged(); return getDiarizationConfigFieldBuilder().getBuilder(); } /** * * *
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * 
* * .google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19; */ public com.google.cloud.speech.v1.SpeakerDiarizationConfigOrBuilder getDiarizationConfigOrBuilder() { if (diarizationConfigBuilder_ != null) { return diarizationConfigBuilder_.getMessageOrBuilder(); } else { return diarizationConfig_ == null ? com.google.cloud.speech.v1.SpeakerDiarizationConfig.getDefaultInstance() : diarizationConfig_; } } /** * * *
     * Config to enable speaker diarization and set additional
     * parameters to make diarization better suited for your application.
     * Note: When this is enabled, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * 
* * .google.cloud.speech.v1.SpeakerDiarizationConfig diarization_config = 19; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.SpeakerDiarizationConfig, com.google.cloud.speech.v1.SpeakerDiarizationConfig.Builder, com.google.cloud.speech.v1.SpeakerDiarizationConfigOrBuilder> getDiarizationConfigFieldBuilder() { if (diarizationConfigBuilder_ == null) { diarizationConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.SpeakerDiarizationConfig, com.google.cloud.speech.v1.SpeakerDiarizationConfig.Builder, com.google.cloud.speech.v1.SpeakerDiarizationConfigOrBuilder>( getDiarizationConfig(), getParentForChildren(), isClean()); diarizationConfig_ = null; } return diarizationConfigBuilder_; } private com.google.cloud.speech.v1.RecognitionMetadata metadata_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.RecognitionMetadata, com.google.cloud.speech.v1.RecognitionMetadata.Builder, com.google.cloud.speech.v1.RecognitionMetadataOrBuilder> metadataBuilder_; /** * * *
     * Metadata regarding this request.
     * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; * * @return Whether the metadata field is set. */ public boolean hasMetadata() { return ((bitField0_ & 0x00020000) != 0); } /** * * *
     * Metadata regarding this request.
     * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; * * @return The metadata. */ public com.google.cloud.speech.v1.RecognitionMetadata getMetadata() { if (metadataBuilder_ == null) { return metadata_ == null ? com.google.cloud.speech.v1.RecognitionMetadata.getDefaultInstance() : metadata_; } else { return metadataBuilder_.getMessage(); } } /** * * *
     * Metadata regarding this request.
     * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; */ public Builder setMetadata(com.google.cloud.speech.v1.RecognitionMetadata value) { if (metadataBuilder_ == null) { if (value == null) { throw new NullPointerException(); } metadata_ = value; } else { metadataBuilder_.setMessage(value); } bitField0_ |= 0x00020000; onChanged(); return this; } /** * * *
     * Metadata regarding this request.
     * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; */ public Builder setMetadata( com.google.cloud.speech.v1.RecognitionMetadata.Builder builderForValue) { if (metadataBuilder_ == null) { metadata_ = builderForValue.build(); } else { metadataBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00020000; onChanged(); return this; } /** * * *
     * Metadata regarding this request.
     * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; */ public Builder mergeMetadata(com.google.cloud.speech.v1.RecognitionMetadata value) { if (metadataBuilder_ == null) { if (((bitField0_ & 0x00020000) != 0) && metadata_ != null && metadata_ != com.google.cloud.speech.v1.RecognitionMetadata.getDefaultInstance()) { getMetadataBuilder().mergeFrom(value); } else { metadata_ = value; } } else { metadataBuilder_.mergeFrom(value); } if (metadata_ != null) { bitField0_ |= 0x00020000; onChanged(); } return this; } /** * * *
     * Metadata regarding this request.
     * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; */ public Builder clearMetadata() { bitField0_ = (bitField0_ & ~0x00020000); metadata_ = null; if (metadataBuilder_ != null) { metadataBuilder_.dispose(); metadataBuilder_ = null; } onChanged(); return this; } /** * * *
     * Metadata regarding this request.
     * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; */ public com.google.cloud.speech.v1.RecognitionMetadata.Builder getMetadataBuilder() { bitField0_ |= 0x00020000; onChanged(); return getMetadataFieldBuilder().getBuilder(); } /** * * *
     * Metadata regarding this request.
     * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; */ public com.google.cloud.speech.v1.RecognitionMetadataOrBuilder getMetadataOrBuilder() { if (metadataBuilder_ != null) { return metadataBuilder_.getMessageOrBuilder(); } else { return metadata_ == null ? com.google.cloud.speech.v1.RecognitionMetadata.getDefaultInstance() : metadata_; } } /** * * *
     * Metadata regarding this request.
     * 
* * .google.cloud.speech.v1.RecognitionMetadata metadata = 9; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.RecognitionMetadata, com.google.cloud.speech.v1.RecognitionMetadata.Builder, com.google.cloud.speech.v1.RecognitionMetadataOrBuilder> getMetadataFieldBuilder() { if (metadataBuilder_ == null) { metadataBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1.RecognitionMetadata, com.google.cloud.speech.v1.RecognitionMetadata.Builder, com.google.cloud.speech.v1.RecognitionMetadataOrBuilder>( getMetadata(), getParentForChildren(), isClean()); metadata_ = null; } return metadataBuilder_; } private java.lang.Object model_ = ""; /** * * *
     * Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * <table>
     *   <tr>
     *     <td><b>Model</b></td>
     *     <td><b>Description</b></td>
     *   </tr>
     *   <tr>
     *     <td><code>latest_long</code></td>
     *     <td>Best for long form content like media or conversation.</td>
     *   </tr>
     *   <tr>
     *     <td><code>latest_short</code></td>
     *     <td>Best for short form content like commands or single shot directed
     *     speech.</td>
     *   </tr>
     *   <tr>
     *     <td><code>command_and_search</code></td>
     *     <td>Best for short queries such as voice commands or voice search.</td>
     *   </tr>
     *   <tr>
     *     <td><code>phone_call</code></td>
     *     <td>Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).</td>
     *   </tr>
     *   <tr>
     *     <td><code>video</code></td>
     *     <td>Best for audio that originated from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>default</code></td>
     *     <td>Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>medical_conversation</code></td>
     *     <td>Best for audio that originated from a conversation between a
     *         medical provider and patient.</td>
     *   </tr>
     *   <tr>
     *     <td><code>medical_dictation</code></td>
     *     <td>Best for audio that originated from dictation notes by a medical
     *         provider.</td>
     *   </tr>
     * </table>
     * 
* * string model = 13; * * @return The model. */ public java.lang.String getModel() { java.lang.Object ref = model_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); model_ = s; return s; } else { return (java.lang.String) ref; } } /** * * *
     * Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * <table>
     *   <tr>
     *     <td><b>Model</b></td>
     *     <td><b>Description</b></td>
     *   </tr>
     *   <tr>
     *     <td><code>latest_long</code></td>
     *     <td>Best for long form content like media or conversation.</td>
     *   </tr>
     *   <tr>
     *     <td><code>latest_short</code></td>
     *     <td>Best for short form content like commands or single shot directed
     *     speech.</td>
     *   </tr>
     *   <tr>
     *     <td><code>command_and_search</code></td>
     *     <td>Best for short queries such as voice commands or voice search.</td>
     *   </tr>
     *   <tr>
     *     <td><code>phone_call</code></td>
     *     <td>Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).</td>
     *   </tr>
     *   <tr>
     *     <td><code>video</code></td>
     *     <td>Best for audio that originated from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>default</code></td>
     *     <td>Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>medical_conversation</code></td>
     *     <td>Best for audio that originated from a conversation between a
     *         medical provider and patient.</td>
     *   </tr>
     *   <tr>
     *     <td><code>medical_dictation</code></td>
     *     <td>Best for audio that originated from dictation notes by a medical
     *         provider.</td>
     *   </tr>
     * </table>
     * 
* * string model = 13; * * @return The bytes for model. */ public com.google.protobuf.ByteString getModelBytes() { java.lang.Object ref = model_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); model_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * *
     * Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * <table>
     *   <tr>
     *     <td><b>Model</b></td>
     *     <td><b>Description</b></td>
     *   </tr>
     *   <tr>
     *     <td><code>latest_long</code></td>
     *     <td>Best for long form content like media or conversation.</td>
     *   </tr>
     *   <tr>
     *     <td><code>latest_short</code></td>
     *     <td>Best for short form content like commands or single shot directed
     *     speech.</td>
     *   </tr>
     *   <tr>
     *     <td><code>command_and_search</code></td>
     *     <td>Best for short queries such as voice commands or voice search.</td>
     *   </tr>
     *   <tr>
     *     <td><code>phone_call</code></td>
     *     <td>Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).</td>
     *   </tr>
     *   <tr>
     *     <td><code>video</code></td>
     *     <td>Best for audio that originated from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>default</code></td>
     *     <td>Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>medical_conversation</code></td>
     *     <td>Best for audio that originated from a conversation between a
     *         medical provider and patient.</td>
     *   </tr>
     *   <tr>
     *     <td><code>medical_dictation</code></td>
     *     <td>Best for audio that originated from dictation notes by a medical
     *         provider.</td>
     *   </tr>
     * </table>
     * 
* * string model = 13; * * @param value The model to set. * @return This builder for chaining. */ public Builder setModel(java.lang.String value) { if (value == null) { throw new NullPointerException(); } model_ = value; bitField0_ |= 0x00040000; onChanged(); return this; } /** * * *
     * Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * <table>
     *   <tr>
     *     <td><b>Model</b></td>
     *     <td><b>Description</b></td>
     *   </tr>
     *   <tr>
     *     <td><code>latest_long</code></td>
     *     <td>Best for long form content like media or conversation.</td>
     *   </tr>
     *   <tr>
     *     <td><code>latest_short</code></td>
     *     <td>Best for short form content like commands or single shot directed
     *     speech.</td>
     *   </tr>
     *   <tr>
     *     <td><code>command_and_search</code></td>
     *     <td>Best for short queries such as voice commands or voice search.</td>
     *   </tr>
     *   <tr>
     *     <td><code>phone_call</code></td>
     *     <td>Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).</td>
     *   </tr>
     *   <tr>
     *     <td><code>video</code></td>
     *     <td>Best for audio that originated from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>default</code></td>
     *     <td>Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>medical_conversation</code></td>
     *     <td>Best for audio that originated from a conversation between a
     *         medical provider and patient.</td>
     *   </tr>
     *   <tr>
     *     <td><code>medical_dictation</code></td>
     *     <td>Best for audio that originated from dictation notes by a medical
     *         provider.</td>
     *   </tr>
     * </table>
     * 
* * string model = 13; * * @return This builder for chaining. */ public Builder clearModel() { model_ = getDefaultInstance().getModel(); bitField0_ = (bitField0_ & ~0x00040000); onChanged(); return this; } /** * * *
     * Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * <table>
     *   <tr>
     *     <td><b>Model</b></td>
     *     <td><b>Description</b></td>
     *   </tr>
     *   <tr>
     *     <td><code>latest_long</code></td>
     *     <td>Best for long form content like media or conversation.</td>
     *   </tr>
     *   <tr>
     *     <td><code>latest_short</code></td>
     *     <td>Best for short form content like commands or single shot directed
     *     speech.</td>
     *   </tr>
     *   <tr>
     *     <td><code>command_and_search</code></td>
     *     <td>Best for short queries such as voice commands or voice search.</td>
     *   </tr>
     *   <tr>
     *     <td><code>phone_call</code></td>
     *     <td>Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).</td>
     *   </tr>
     *   <tr>
     *     <td><code>video</code></td>
     *     <td>Best for audio that originated from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>default</code></td>
     *     <td>Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>medical_conversation</code></td>
     *     <td>Best for audio that originated from a conversation between a
     *         medical provider and patient.</td>
     *   </tr>
     *   <tr>
     *     <td><code>medical_dictation</code></td>
     *     <td>Best for audio that originated from dictation notes by a medical
     *         provider.</td>
     *   </tr>
     * </table>
     * 
* * string model = 13; * * @param value The bytes for model to set. * @return This builder for chaining. */ public Builder setModelBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); model_ = value; bitField0_ |= 0x00040000; onChanged(); return this; } private boolean useEnhanced_; /** * * *
     * Set to true to use an enhanced model for speech recognition.
     * If `use_enhanced` is set to true and the `model` field is not set, then
     * an appropriate enhanced model is chosen if an enhanced model exists for
     * the audio.
     *
     * If `use_enhanced` is true and an enhanced version of the specified model
     * does not exist, then the speech is recognized using the standard version
     * of the specified model.
     * 
* * bool use_enhanced = 14; * * @return The useEnhanced. */ @java.lang.Override public boolean getUseEnhanced() { return useEnhanced_; } /** * * *
     * Set to true to use an enhanced model for speech recognition.
     * If `use_enhanced` is set to true and the `model` field is not set, then
     * an appropriate enhanced model is chosen if an enhanced model exists for
     * the audio.
     *
     * If `use_enhanced` is true and an enhanced version of the specified model
     * does not exist, then the speech is recognized using the standard version
     * of the specified model.
     * 
* * bool use_enhanced = 14; * * @param value The useEnhanced to set. * @return This builder for chaining. */ public Builder setUseEnhanced(boolean value) { useEnhanced_ = value; bitField0_ |= 0x00080000; onChanged(); return this; } /** * * *
     * Set to true to use an enhanced model for speech recognition.
     * If `use_enhanced` is set to true and the `model` field is not set, then
     * an appropriate enhanced model is chosen if an enhanced model exists for
     * the audio.
     *
     * If `use_enhanced` is true and an enhanced version of the specified model
     * does not exist, then the speech is recognized using the standard version
     * of the specified model.
     * 
* * bool use_enhanced = 14; * * @return This builder for chaining. */ public Builder clearUseEnhanced() { bitField0_ = (bitField0_ & ~0x00080000); useEnhanced_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.speech.v1.RecognitionConfig) } // @@protoc_insertion_point(class_scope:google.cloud.speech.v1.RecognitionConfig) private static final com.google.cloud.speech.v1.RecognitionConfig DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.speech.v1.RecognitionConfig(); } public static com.google.cloud.speech.v1.RecognitionConfig getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override public RecognitionConfig parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (com.google.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.speech.v1.RecognitionConfig getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy