All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.speech.v1p1beta1.RecognitionConfig Maven / Gradle / Ivy

There is a newer version: 2.44.0
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/speech/v1p1beta1/cloud_speech.proto

package com.google.cloud.speech.v1p1beta1;

/**
 *
 *
 * 
 * Provides information to the recognizer that specifies how to process the
 * request.
 * 
* * Protobuf type {@code google.cloud.speech.v1p1beta1.RecognitionConfig} */ public final class RecognitionConfig extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.speech.v1p1beta1.RecognitionConfig) RecognitionConfigOrBuilder { private static final long serialVersionUID = 0L; // Use RecognitionConfig.newBuilder() to construct. private RecognitionConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private RecognitionConfig() { encoding_ = 0; sampleRateHertz_ = 0; audioChannelCount_ = 0; enableSeparateRecognitionPerChannel_ = false; languageCode_ = ""; alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY; maxAlternatives_ = 0; profanityFilter_ = false; speechContexts_ = java.util.Collections.emptyList(); enableWordTimeOffsets_ = false; enableWordConfidence_ = false; enableAutomaticPunctuation_ = false; enableSpeakerDiarization_ = false; diarizationSpeakerCount_ = 0; model_ = ""; useEnhanced_ = false; } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private RecognitionConfig( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { int rawValue = input.readEnum(); encoding_ = rawValue; break; } case 16: { sampleRateHertz_ = input.readInt32(); break; } case 26: { java.lang.String s = input.readStringRequireUtf8(); languageCode_ = s; break; } case 32: { maxAlternatives_ = input.readInt32(); break; } case 40: { profanityFilter_ = input.readBool(); break; } case 50: { if (!((mutable_bitField0_ & 0x00000100) == 0x00000100)) { speechContexts_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000100; } speechContexts_.add( input.readMessage( com.google.cloud.speech.v1p1beta1.SpeechContext.parser(), extensionRegistry)); break; } case 56: { audioChannelCount_ = input.readInt32(); break; } case 64: { enableWordTimeOffsets_ = input.readBool(); break; } case 74: { com.google.cloud.speech.v1p1beta1.RecognitionMetadata.Builder subBuilder = null; if (metadata_ != null) { subBuilder = metadata_.toBuilder(); } metadata_ = input.readMessage( com.google.cloud.speech.v1p1beta1.RecognitionMetadata.parser(), extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom(metadata_); metadata_ = subBuilder.buildPartial(); } break; } case 88: { enableAutomaticPunctuation_ = input.readBool(); break; } case 96: { enableSeparateRecognitionPerChannel_ = input.readBool(); break; } case 106: { java.lang.String s = input.readStringRequireUtf8(); model_ = s; break; } case 112: { useEnhanced_ = input.readBool(); break; } case 120: { enableWordConfidence_ = input.readBool(); break; } case 128: { enableSpeakerDiarization_ = input.readBool(); break; } case 136: { diarizationSpeakerCount_ = input.readInt32(); break; } case 146: { java.lang.String s = input.readStringRequireUtf8(); if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { alternativeLanguageCodes_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000020; } alternativeLanguageCodes_.add(s); break; } default: { if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000100) == 0x00000100)) { speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_); } if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { alternativeLanguageCodes_ = alternativeLanguageCodes_.getUnmodifiableView(); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.speech.v1p1beta1.SpeechProto .internal_static_google_cloud_speech_v1p1beta1_RecognitionConfig_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.speech.v1p1beta1.SpeechProto .internal_static_google_cloud_speech_v1p1beta1_RecognitionConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.speech.v1p1beta1.RecognitionConfig.class, com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder.class); } /** * * *
   * The encoding of the audio data sent in the request.
   * All encodings support only 1 channel (mono) audio.
   * For best results, the audio source should be captured and transmitted using
   * a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
   * recognition can be reduced if lossy codecs are used to capture or transmit
   * audio, particularly if background noise is present. Lossy codecs include
   * `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, and `SPEEX_WITH_HEADER_BYTE`.
   * The `FLAC` and `WAV` audio file formats include a header that describes the
   * included audio content. You can request recognition for `WAV` files that
   * contain either `LINEAR16` or `MULAW` encoded audio.
   * If you send `FLAC` or `WAV` audio file format in
   * your request, you do not need to specify an `AudioEncoding`; the audio
   * encoding format is determined from the file header. If you specify
   * an `AudioEncoding` when you send  send `FLAC` or `WAV` audio, the
   * encoding configuration must match the encoding described in the audio
   * header; otherwise the request returns an
   * [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code.
   * 
* * Protobuf enum {@code google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding} */ public enum AudioEncoding implements com.google.protobuf.ProtocolMessageEnum { /** * * *
     * Not specified.
     * 
* * ENCODING_UNSPECIFIED = 0; */ ENCODING_UNSPECIFIED(0), /** * * *
     * Uncompressed 16-bit signed little-endian samples (Linear PCM).
     * 
* * LINEAR16 = 1; */ LINEAR16(1), /** * * *
     * `FLAC` (Free Lossless Audio
     * Codec) is the recommended encoding because it is
     * lossless--therefore recognition is not compromised--and
     * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
     * encoding supports 16-bit and 24-bit samples, however, not all fields in
     * `STREAMINFO` are supported.
     * 
* * FLAC = 2; */ FLAC(2), /** * * *
     * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
     * 
* * MULAW = 3; */ MULAW(3), /** * * *
     * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
     * 
* * AMR = 4; */ AMR(4), /** * * *
     * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
     * 
* * AMR_WB = 5; */ AMR_WB(5), /** * * *
     * Opus encoded audio frames in Ogg container
     * ([OggOpus](https://wiki.xiph.org/OggOpus)).
     * `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
     * 
* * OGG_OPUS = 6; */ OGG_OPUS(6), /** * * *
     * Although the use of lossy encodings is not recommended, if a very low
     * bitrate encoding is required, `OGG_OPUS` is highly preferred over
     * Speex encoding. The [Speex](https://speex.org/)  encoding supported by
     * Cloud Speech API has a header byte in each block, as in MIME type
     * `audio/x-speex-with-header-byte`.
     * It is a variant of the RTP Speex encoding defined in
     * [RFC 5574](https://tools.ietf.org/html/rfc5574).
     * The stream is a sequence of blocks, one block per RTP packet. Each block
     * starts with a byte containing the length of the block, in bytes, followed
     * by one or more frames of Speex data, padded to an integral number of
     * bytes (octets) as specified in RFC 5574. In other words, each RTP header
     * is replaced with a single byte containing the block length. Only Speex
     * wideband is supported. `sample_rate_hertz` must be 16000.
     * 
* * SPEEX_WITH_HEADER_BYTE = 7; */ SPEEX_WITH_HEADER_BYTE(7), UNRECOGNIZED(-1), ; /** * * *
     * Not specified.
     * 
* * ENCODING_UNSPECIFIED = 0; */ public static final int ENCODING_UNSPECIFIED_VALUE = 0; /** * * *
     * Uncompressed 16-bit signed little-endian samples (Linear PCM).
     * 
* * LINEAR16 = 1; */ public static final int LINEAR16_VALUE = 1; /** * * *
     * `FLAC` (Free Lossless Audio
     * Codec) is the recommended encoding because it is
     * lossless--therefore recognition is not compromised--and
     * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
     * encoding supports 16-bit and 24-bit samples, however, not all fields in
     * `STREAMINFO` are supported.
     * 
* * FLAC = 2; */ public static final int FLAC_VALUE = 2; /** * * *
     * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
     * 
* * MULAW = 3; */ public static final int MULAW_VALUE = 3; /** * * *
     * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
     * 
* * AMR = 4; */ public static final int AMR_VALUE = 4; /** * * *
     * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
     * 
* * AMR_WB = 5; */ public static final int AMR_WB_VALUE = 5; /** * * *
     * Opus encoded audio frames in Ogg container
     * ([OggOpus](https://wiki.xiph.org/OggOpus)).
     * `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
     * 
* * OGG_OPUS = 6; */ public static final int OGG_OPUS_VALUE = 6; /** * * *
     * Although the use of lossy encodings is not recommended, if a very low
     * bitrate encoding is required, `OGG_OPUS` is highly preferred over
     * Speex encoding. The [Speex](https://speex.org/)  encoding supported by
     * Cloud Speech API has a header byte in each block, as in MIME type
     * `audio/x-speex-with-header-byte`.
     * It is a variant of the RTP Speex encoding defined in
     * [RFC 5574](https://tools.ietf.org/html/rfc5574).
     * The stream is a sequence of blocks, one block per RTP packet. Each block
     * starts with a byte containing the length of the block, in bytes, followed
     * by one or more frames of Speex data, padded to an integral number of
     * bytes (octets) as specified in RFC 5574. In other words, each RTP header
     * is replaced with a single byte containing the block length. Only Speex
     * wideband is supported. `sample_rate_hertz` must be 16000.
     * 
* * SPEEX_WITH_HEADER_BYTE = 7; */ public static final int SPEEX_WITH_HEADER_BYTE_VALUE = 7; public final int getNumber() { if (this == UNRECOGNIZED) { throw new java.lang.IllegalArgumentException( "Can't get the number of an unknown enum value."); } return value; } /** @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static AudioEncoding valueOf(int value) { return forNumber(value); } public static AudioEncoding forNumber(int value) { switch (value) { case 0: return ENCODING_UNSPECIFIED; case 1: return LINEAR16; case 2: return FLAC; case 3: return MULAW; case 4: return AMR; case 5: return AMR_WB; case 6: return OGG_OPUS; case 7: return SPEEX_WITH_HEADER_BYTE; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public AudioEncoding findValueByNumber(int number) { return AudioEncoding.forNumber(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return com.google.cloud.speech.v1p1beta1.RecognitionConfig.getDescriptor() .getEnumTypes() .get(0); } private static final AudioEncoding[] VALUES = values(); public static AudioEncoding valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); } if (desc.getIndex() == -1) { return UNRECOGNIZED; } return VALUES[desc.getIndex()]; } private final int value; private AudioEncoding(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding) } private int bitField0_; public static final int ENCODING_FIELD_NUMBER = 1; private int encoding_; /** * * *
   * Encoding of audio data sent in all `RecognitionAudio` messages.
   * This field is optional for `FLAC` and `WAV` audio files and required
   * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
   * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; */ public int getEncodingValue() { return encoding_; } /** * * *
   * Encoding of audio data sent in all `RecognitionAudio` messages.
   * This field is optional for `FLAC` and `WAV` audio files and required
   * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
   * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; */ public com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding getEncoding() { @SuppressWarnings("deprecation") com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding result = com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.valueOf(encoding_); return result == null ? com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.UNRECOGNIZED : result; } public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 2; private int sampleRateHertz_; /** * * *
   * Sample rate in Hertz of the audio data sent in all
   * `RecognitionAudio` messages. Valid values are: 8000-48000.
   * 16000 is optimal. For best results, set the sampling rate of the audio
   * source to 16000 Hz. If that's not possible, use the native sample rate of
   * the audio source (instead of re-sampling).
   * This field is optional for `FLAC` and `WAV` audio files and required
   * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
   * 
* * int32 sample_rate_hertz = 2; */ public int getSampleRateHertz() { return sampleRateHertz_; } public static final int AUDIO_CHANNEL_COUNT_FIELD_NUMBER = 7; private int audioChannelCount_; /** * * *
   * *Optional* The number of channels in the input audio data.
   * ONLY set this for MULTI-CHANNEL recognition.
   * Valid values for LINEAR16 and FLAC are `1`-`8`.
   * Valid values for OGG_OPUS are '1'-'254'.
   * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
   * If `0` or omitted, defaults to one channel (mono).
   * Note: We only recognize the first channel by default.
   * To perform independent recognition on each channel set
   * `enable_separate_recognition_per_channel` to 'true'.
   * 
* * int32 audio_channel_count = 7; */ public int getAudioChannelCount() { return audioChannelCount_; } public static final int ENABLE_SEPARATE_RECOGNITION_PER_CHANNEL_FIELD_NUMBER = 12; private boolean enableSeparateRecognitionPerChannel_; /** * * *
   * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
   * to get each channel recognized separately. The recognition result will
   * contain a `channel_tag` field to state which channel that result belongs
   * to. If this is not true, we will only recognize the first channel. The
   * request is billed cumulatively for all channels recognized:
   * `audio_channel_count` multiplied by the length of the audio.
   * 
* * bool enable_separate_recognition_per_channel = 12; */ public boolean getEnableSeparateRecognitionPerChannel() { return enableSeparateRecognitionPerChannel_; } public static final int LANGUAGE_CODE_FIELD_NUMBER = 3; private volatile java.lang.Object languageCode_; /** * * *
   * *Required* The language of the supplied audio as a
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
   * Example: "en-US".
   * See [Language Support](/speech-to-text/docs/languages)
   * for a list of the currently supported language codes.
   * 
* * string language_code = 3; */ public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); languageCode_ = s; return s; } } /** * * *
   * *Required* The language of the supplied audio as a
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
   * Example: "en-US".
   * See [Language Support](/speech-to-text/docs/languages)
   * for a list of the currently supported language codes.
   * 
* * string language_code = 3; */ public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); languageCode_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int ALTERNATIVE_LANGUAGE_CODES_FIELD_NUMBER = 18; private com.google.protobuf.LazyStringList alternativeLanguageCodes_; /** * * *
   * *Optional* A list of up to 3 additional
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   * listing possible alternative languages of the supplied audio.
   * See [Language Support](/speech-to-text/docs/languages)
   * for a list of the currently supported language codes.
   * If alternative languages are listed, recognition result will contain
   * recognition in the most likely language detected including the main
   * language_code. The recognition result will include the language tag
   * of the language detected in the audio.
   * Note: This feature is only supported for Voice Command and Voice Search
   * use cases and performance may vary for other use cases (e.g., phone call
   * transcription).
   * 
* * repeated string alternative_language_codes = 18; */ public com.google.protobuf.ProtocolStringList getAlternativeLanguageCodesList() { return alternativeLanguageCodes_; } /** * * *
   * *Optional* A list of up to 3 additional
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   * listing possible alternative languages of the supplied audio.
   * See [Language Support](/speech-to-text/docs/languages)
   * for a list of the currently supported language codes.
   * If alternative languages are listed, recognition result will contain
   * recognition in the most likely language detected including the main
   * language_code. The recognition result will include the language tag
   * of the language detected in the audio.
   * Note: This feature is only supported for Voice Command and Voice Search
   * use cases and performance may vary for other use cases (e.g., phone call
   * transcription).
   * 
* * repeated string alternative_language_codes = 18; */ public int getAlternativeLanguageCodesCount() { return alternativeLanguageCodes_.size(); } /** * * *
   * *Optional* A list of up to 3 additional
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   * listing possible alternative languages of the supplied audio.
   * See [Language Support](/speech-to-text/docs/languages)
   * for a list of the currently supported language codes.
   * If alternative languages are listed, recognition result will contain
   * recognition in the most likely language detected including the main
   * language_code. The recognition result will include the language tag
   * of the language detected in the audio.
   * Note: This feature is only supported for Voice Command and Voice Search
   * use cases and performance may vary for other use cases (e.g., phone call
   * transcription).
   * 
* * repeated string alternative_language_codes = 18; */ public java.lang.String getAlternativeLanguageCodes(int index) { return alternativeLanguageCodes_.get(index); } /** * * *
   * *Optional* A list of up to 3 additional
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
   * listing possible alternative languages of the supplied audio.
   * See [Language Support](/speech-to-text/docs/languages)
   * for a list of the currently supported language codes.
   * If alternative languages are listed, recognition result will contain
   * recognition in the most likely language detected including the main
   * language_code. The recognition result will include the language tag
   * of the language detected in the audio.
   * Note: This feature is only supported for Voice Command and Voice Search
   * use cases and performance may vary for other use cases (e.g., phone call
   * transcription).
   * 
* * repeated string alternative_language_codes = 18; */ public com.google.protobuf.ByteString getAlternativeLanguageCodesBytes(int index) { return alternativeLanguageCodes_.getByteString(index); } public static final int MAX_ALTERNATIVES_FIELD_NUMBER = 4; private int maxAlternatives_; /** * * *
   * *Optional* Maximum number of recognition hypotheses to be returned.
   * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
   * within each `SpeechRecognitionResult`.
   * The server may return fewer than `max_alternatives`.
   * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
   * one. If omitted, will return a maximum of one.
   * 
* * int32 max_alternatives = 4; */ public int getMaxAlternatives() { return maxAlternatives_; } public static final int PROFANITY_FILTER_FIELD_NUMBER = 5; private boolean profanityFilter_; /** * * *
   * *Optional* If set to `true`, the server will attempt to filter out
   * profanities, replacing all but the initial character in each filtered word
   * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
   * won't be filtered out.
   * 
* * bool profanity_filter = 5; */ public boolean getProfanityFilter() { return profanityFilter_; } public static final int SPEECH_CONTEXTS_FIELD_NUMBER = 6; private java.util.List speechContexts_; /** * * *
   * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
   * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsList() { return speechContexts_; } /** * * *
   * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
   * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsOrBuilderList() { return speechContexts_; } /** * * *
   * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
   * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public int getSpeechContextsCount() { return speechContexts_.size(); } /** * * *
   * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
   * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1p1beta1.SpeechContext getSpeechContexts(int index) { return speechContexts_.get(index); } /** * * *
   * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
   * A means to provide context to assist the speech recognition. For more
   * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
   * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder getSpeechContextsOrBuilder( int index) { return speechContexts_.get(index); } public static final int ENABLE_WORD_TIME_OFFSETS_FIELD_NUMBER = 8; private boolean enableWordTimeOffsets_; /** * * *
   * *Optional* If `true`, the top result includes a list of words and
   * the start and end time offsets (timestamps) for those words. If
   * `false`, no word-level time offset information is returned. The default is
   * `false`.
   * 
* * bool enable_word_time_offsets = 8; */ public boolean getEnableWordTimeOffsets() { return enableWordTimeOffsets_; } public static final int ENABLE_WORD_CONFIDENCE_FIELD_NUMBER = 15; private boolean enableWordConfidence_; /** * * *
   * *Optional* If `true`, the top result includes a list of words and the
   * confidence for those words. If `false`, no word-level confidence
   * information is returned. The default is `false`.
   * 
* * bool enable_word_confidence = 15; */ public boolean getEnableWordConfidence() { return enableWordConfidence_; } public static final int ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER = 11; private boolean enableAutomaticPunctuation_; /** * * *
   * *Optional* If 'true', adds punctuation to recognition result hypotheses.
   * This feature is only available in select languages. Setting this for
   * requests in other languages has no effect at all.
   * The default 'false' value does not add punctuation to result hypotheses.
   * Note: This is currently offered as an experimental service, complimentary
   * to all users. In the future this may be exclusively available as a
   * premium feature.
   * 
* * bool enable_automatic_punctuation = 11; */ public boolean getEnableAutomaticPunctuation() { return enableAutomaticPunctuation_; } public static final int ENABLE_SPEAKER_DIARIZATION_FIELD_NUMBER = 16; private boolean enableSpeakerDiarization_; /** * * *
   * *Optional* If 'true', enables speaker detection for each recognized word in
   * the top alternative of the recognition result using a speaker_tag provided
   * in the WordInfo.
   * Note: When this is true, we send all the words from the beginning of the
   * audio for the top alternative in every consecutive STREAMING responses.
   * This is done in order to improve our speaker tags as our models learn to
   * identify the speakers in the conversation over time.
   * For non-streaming requests, the diarization results will be provided only
   * in the top alternative of the FINAL SpeechRecognitionResult.
   * 
* * bool enable_speaker_diarization = 16; */ public boolean getEnableSpeakerDiarization() { return enableSpeakerDiarization_; } public static final int DIARIZATION_SPEAKER_COUNT_FIELD_NUMBER = 17; private int diarizationSpeakerCount_; /** * * *
   * *Optional*
   * If set, specifies the estimated number of speakers in the conversation.
   * If not set, defaults to '2'.
   * Ignored unless enable_speaker_diarization is set to true."
   * 
* * int32 diarization_speaker_count = 17; */ public int getDiarizationSpeakerCount() { return diarizationSpeakerCount_; } public static final int METADATA_FIELD_NUMBER = 9; private com.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata_; /** * * *
   * *Optional* Metadata regarding this request.
   * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; */ public boolean hasMetadata() { return metadata_ != null; } /** * * *
   * *Optional* Metadata regarding this request.
   * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; */ public com.google.cloud.speech.v1p1beta1.RecognitionMetadata getMetadata() { return metadata_ == null ? com.google.cloud.speech.v1p1beta1.RecognitionMetadata.getDefaultInstance() : metadata_; } /** * * *
   * *Optional* Metadata regarding this request.
   * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; */ public com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder getMetadataOrBuilder() { return getMetadata(); } public static final int MODEL_FIELD_NUMBER = 13; private volatile java.lang.Object model_; /** * * *
   * *Optional* Which model to select for the given request. Select the model
   * best suited to your domain to get best results. If a model is not
   * explicitly specified, then we auto-select a model based on the parameters
   * in the RecognitionConfig.
   * <table>
   *   <tr>
   *     <td><b>Model</b></td>
   *     <td><b>Description</b></td>
   *   </tr>
   *   <tr>
   *     <td><code>command_and_search</code></td>
   *     <td>Best for short queries such as voice commands or voice search.</td>
   *   </tr>
   *   <tr>
   *     <td><code>phone_call</code></td>
   *     <td>Best for audio that originated from a phone call (typically
   *     recorded at an 8khz sampling rate).</td>
   *   </tr>
   *   <tr>
   *     <td><code>video</code></td>
   *     <td>Best for audio that originated from from video or includes multiple
   *         speakers. Ideally the audio is recorded at a 16khz or greater
   *         sampling rate. This is a premium model that costs more than the
   *         standard rate.</td>
   *   </tr>
   *   <tr>
   *     <td><code>default</code></td>
   *     <td>Best for audio that is not one of the specific audio models.
   *         For example, long-form audio. Ideally the audio is high-fidelity,
   *         recorded at a 16khz or greater sampling rate.</td>
   *   </tr>
   * </table>
   * 
* * string model = 13; */ public java.lang.String getModel() { java.lang.Object ref = model_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); model_ = s; return s; } } /** * * *
   * *Optional* Which model to select for the given request. Select the model
   * best suited to your domain to get best results. If a model is not
   * explicitly specified, then we auto-select a model based on the parameters
   * in the RecognitionConfig.
   * <table>
   *   <tr>
   *     <td><b>Model</b></td>
   *     <td><b>Description</b></td>
   *   </tr>
   *   <tr>
   *     <td><code>command_and_search</code></td>
   *     <td>Best for short queries such as voice commands or voice search.</td>
   *   </tr>
   *   <tr>
   *     <td><code>phone_call</code></td>
   *     <td>Best for audio that originated from a phone call (typically
   *     recorded at an 8khz sampling rate).</td>
   *   </tr>
   *   <tr>
   *     <td><code>video</code></td>
   *     <td>Best for audio that originated from from video or includes multiple
   *         speakers. Ideally the audio is recorded at a 16khz or greater
   *         sampling rate. This is a premium model that costs more than the
   *         standard rate.</td>
   *   </tr>
   *   <tr>
   *     <td><code>default</code></td>
   *     <td>Best for audio that is not one of the specific audio models.
   *         For example, long-form audio. Ideally the audio is high-fidelity,
   *         recorded at a 16khz or greater sampling rate.</td>
   *   </tr>
   * </table>
   * 
* * string model = 13; */ public com.google.protobuf.ByteString getModelBytes() { java.lang.Object ref = model_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); model_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int USE_ENHANCED_FIELD_NUMBER = 14; private boolean useEnhanced_; /** * * *
   * *Optional* Set to true to use an enhanced model for speech recognition.
   * If `use_enhanced` is set to true and the `model` field is not set, then
   * an appropriate enhanced model is chosen if:
   * 1. project is eligible for requesting enhanced models
   * 2. an enhanced model exists for the audio
   * If `use_enhanced` is true and an enhanced version of the specified model
   * does not exist, then the speech is recognized using the standard version
   * of the specified model.
   * Enhanced speech models require that you opt-in to data logging using
   * instructions in the
   * [documentation](/speech-to-text/docs/enable-data-logging). If you set
   * `use_enhanced` to true and you have not enabled audio logging, then you
   * will receive an error.
   * 
* * bool use_enhanced = 14; */ public boolean getUseEnhanced() { return useEnhanced_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (encoding_ != com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED .getNumber()) { output.writeEnum(1, encoding_); } if (sampleRateHertz_ != 0) { output.writeInt32(2, sampleRateHertz_); } if (!getLanguageCodeBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 3, languageCode_); } if (maxAlternatives_ != 0) { output.writeInt32(4, maxAlternatives_); } if (profanityFilter_ != false) { output.writeBool(5, profanityFilter_); } for (int i = 0; i < speechContexts_.size(); i++) { output.writeMessage(6, speechContexts_.get(i)); } if (audioChannelCount_ != 0) { output.writeInt32(7, audioChannelCount_); } if (enableWordTimeOffsets_ != false) { output.writeBool(8, enableWordTimeOffsets_); } if (metadata_ != null) { output.writeMessage(9, getMetadata()); } if (enableAutomaticPunctuation_ != false) { output.writeBool(11, enableAutomaticPunctuation_); } if (enableSeparateRecognitionPerChannel_ != false) { output.writeBool(12, enableSeparateRecognitionPerChannel_); } if (!getModelBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 13, model_); } if (useEnhanced_ != false) { output.writeBool(14, useEnhanced_); } if (enableWordConfidence_ != false) { output.writeBool(15, enableWordConfidence_); } if (enableSpeakerDiarization_ != false) { output.writeBool(16, enableSpeakerDiarization_); } if (diarizationSpeakerCount_ != 0) { output.writeInt32(17, diarizationSpeakerCount_); } for (int i = 0; i < alternativeLanguageCodes_.size(); i++) { com.google.protobuf.GeneratedMessageV3.writeString( output, 18, alternativeLanguageCodes_.getRaw(i)); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (encoding_ != com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED .getNumber()) { size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, encoding_); } if (sampleRateHertz_ != 0) { size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, sampleRateHertz_); } if (!getLanguageCodeBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, languageCode_); } if (maxAlternatives_ != 0) { size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, maxAlternatives_); } if (profanityFilter_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, profanityFilter_); } for (int i = 0; i < speechContexts_.size(); i++) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, speechContexts_.get(i)); } if (audioChannelCount_ != 0) { size += com.google.protobuf.CodedOutputStream.computeInt32Size(7, audioChannelCount_); } if (enableWordTimeOffsets_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, enableWordTimeOffsets_); } if (metadata_ != null) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(9, getMetadata()); } if (enableAutomaticPunctuation_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(11, enableAutomaticPunctuation_); } if (enableSeparateRecognitionPerChannel_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize( 12, enableSeparateRecognitionPerChannel_); } if (!getModelBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(13, model_); } if (useEnhanced_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(14, useEnhanced_); } if (enableWordConfidence_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(15, enableWordConfidence_); } if (enableSpeakerDiarization_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(16, enableSpeakerDiarization_); } if (diarizationSpeakerCount_ != 0) { size += com.google.protobuf.CodedOutputStream.computeInt32Size(17, diarizationSpeakerCount_); } { int dataSize = 0; for (int i = 0; i < alternativeLanguageCodes_.size(); i++) { dataSize += computeStringSizeNoTag(alternativeLanguageCodes_.getRaw(i)); } size += dataSize; size += 2 * getAlternativeLanguageCodesList().size(); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1p1beta1.RecognitionConfig)) { return super.equals(obj); } com.google.cloud.speech.v1p1beta1.RecognitionConfig other = (com.google.cloud.speech.v1p1beta1.RecognitionConfig) obj; boolean result = true; result = result && encoding_ == other.encoding_; result = result && (getSampleRateHertz() == other.getSampleRateHertz()); result = result && (getAudioChannelCount() == other.getAudioChannelCount()); result = result && (getEnableSeparateRecognitionPerChannel() == other.getEnableSeparateRecognitionPerChannel()); result = result && getLanguageCode().equals(other.getLanguageCode()); result = result && getAlternativeLanguageCodesList().equals(other.getAlternativeLanguageCodesList()); result = result && (getMaxAlternatives() == other.getMaxAlternatives()); result = result && (getProfanityFilter() == other.getProfanityFilter()); result = result && getSpeechContextsList().equals(other.getSpeechContextsList()); result = result && (getEnableWordTimeOffsets() == other.getEnableWordTimeOffsets()); result = result && (getEnableWordConfidence() == other.getEnableWordConfidence()); result = result && (getEnableAutomaticPunctuation() == other.getEnableAutomaticPunctuation()); result = result && (getEnableSpeakerDiarization() == other.getEnableSpeakerDiarization()); result = result && (getDiarizationSpeakerCount() == other.getDiarizationSpeakerCount()); result = result && (hasMetadata() == other.hasMetadata()); if (hasMetadata()) { result = result && getMetadata().equals(other.getMetadata()); } result = result && getModel().equals(other.getModel()); result = result && (getUseEnhanced() == other.getUseEnhanced()); result = result && unknownFields.equals(other.unknownFields); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + ENCODING_FIELD_NUMBER; hash = (53 * hash) + encoding_; hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER; hash = (53 * hash) + getSampleRateHertz(); hash = (37 * hash) + AUDIO_CHANNEL_COUNT_FIELD_NUMBER; hash = (53 * hash) + getAudioChannelCount(); hash = (37 * hash) + ENABLE_SEPARATE_RECOGNITION_PER_CHANNEL_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableSeparateRecognitionPerChannel()); hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER; hash = (53 * hash) + getLanguageCode().hashCode(); if (getAlternativeLanguageCodesCount() > 0) { hash = (37 * hash) + ALTERNATIVE_LANGUAGE_CODES_FIELD_NUMBER; hash = (53 * hash) + getAlternativeLanguageCodesList().hashCode(); } hash = (37 * hash) + MAX_ALTERNATIVES_FIELD_NUMBER; hash = (53 * hash) + getMaxAlternatives(); hash = (37 * hash) + PROFANITY_FILTER_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getProfanityFilter()); if (getSpeechContextsCount() > 0) { hash = (37 * hash) + SPEECH_CONTEXTS_FIELD_NUMBER; hash = (53 * hash) + getSpeechContextsList().hashCode(); } hash = (37 * hash) + ENABLE_WORD_TIME_OFFSETS_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordTimeOffsets()); hash = (37 * hash) + ENABLE_WORD_CONFIDENCE_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordConfidence()); hash = (37 * hash) + ENABLE_AUTOMATIC_PUNCTUATION_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableAutomaticPunctuation()); hash = (37 * hash) + ENABLE_SPEAKER_DIARIZATION_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableSpeakerDiarization()); hash = (37 * hash) + DIARIZATION_SPEAKER_COUNT_FIELD_NUMBER; hash = (53 * hash) + getDiarizationSpeakerCount(); if (hasMetadata()) { hash = (37 * hash) + METADATA_FIELD_NUMBER; hash = (53 * hash) + getMetadata().hashCode(); } hash = (37 * hash) + MODEL_FIELD_NUMBER; hash = (53 * hash) + getModel().hashCode(); hash = (37 * hash) + USE_ENHANCED_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getUseEnhanced()); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(com.google.cloud.speech.v1p1beta1.RecognitionConfig prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * *
   * Provides information to the recognizer that specifies how to process the
   * request.
   * 
* * Protobuf type {@code google.cloud.speech.v1p1beta1.RecognitionConfig} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.cloud.speech.v1p1beta1.RecognitionConfig) com.google.cloud.speech.v1p1beta1.RecognitionConfigOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.speech.v1p1beta1.SpeechProto .internal_static_google_cloud_speech_v1p1beta1_RecognitionConfig_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.speech.v1p1beta1.SpeechProto .internal_static_google_cloud_speech_v1p1beta1_RecognitionConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.speech.v1p1beta1.RecognitionConfig.class, com.google.cloud.speech.v1p1beta1.RecognitionConfig.Builder.class); } // Construct using com.google.cloud.speech.v1p1beta1.RecognitionConfig.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { getSpeechContextsFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); encoding_ = 0; sampleRateHertz_ = 0; audioChannelCount_ = 0; enableSeparateRecognitionPerChannel_ = false; languageCode_ = ""; alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000020); maxAlternatives_ = 0; profanityFilter_ = false; if (speechContextsBuilder_ == null) { speechContexts_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000100); } else { speechContextsBuilder_.clear(); } enableWordTimeOffsets_ = false; enableWordConfidence_ = false; enableAutomaticPunctuation_ = false; enableSpeakerDiarization_ = false; diarizationSpeakerCount_ = 0; if (metadataBuilder_ == null) { metadata_ = null; } else { metadata_ = null; metadataBuilder_ = null; } model_ = ""; useEnhanced_ = false; return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.speech.v1p1beta1.SpeechProto .internal_static_google_cloud_speech_v1p1beta1_RecognitionConfig_descriptor; } @java.lang.Override public com.google.cloud.speech.v1p1beta1.RecognitionConfig getDefaultInstanceForType() { return com.google.cloud.speech.v1p1beta1.RecognitionConfig.getDefaultInstance(); } @java.lang.Override public com.google.cloud.speech.v1p1beta1.RecognitionConfig build() { com.google.cloud.speech.v1p1beta1.RecognitionConfig result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.speech.v1p1beta1.RecognitionConfig buildPartial() { com.google.cloud.speech.v1p1beta1.RecognitionConfig result = new com.google.cloud.speech.v1p1beta1.RecognitionConfig(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; result.encoding_ = encoding_; result.sampleRateHertz_ = sampleRateHertz_; result.audioChannelCount_ = audioChannelCount_; result.enableSeparateRecognitionPerChannel_ = enableSeparateRecognitionPerChannel_; result.languageCode_ = languageCode_; if (((bitField0_ & 0x00000020) == 0x00000020)) { alternativeLanguageCodes_ = alternativeLanguageCodes_.getUnmodifiableView(); bitField0_ = (bitField0_ & ~0x00000020); } result.alternativeLanguageCodes_ = alternativeLanguageCodes_; result.maxAlternatives_ = maxAlternatives_; result.profanityFilter_ = profanityFilter_; if (speechContextsBuilder_ == null) { if (((bitField0_ & 0x00000100) == 0x00000100)) { speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_); bitField0_ = (bitField0_ & ~0x00000100); } result.speechContexts_ = speechContexts_; } else { result.speechContexts_ = speechContextsBuilder_.build(); } result.enableWordTimeOffsets_ = enableWordTimeOffsets_; result.enableWordConfidence_ = enableWordConfidence_; result.enableAutomaticPunctuation_ = enableAutomaticPunctuation_; result.enableSpeakerDiarization_ = enableSpeakerDiarization_; result.diarizationSpeakerCount_ = diarizationSpeakerCount_; if (metadataBuilder_ == null) { result.metadata_ = metadata_; } else { result.metadata_ = metadataBuilder_.build(); } result.model_ = model_; result.useEnhanced_ = useEnhanced_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return (Builder) super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return (Builder) super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return (Builder) super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return (Builder) super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.speech.v1p1beta1.RecognitionConfig) { return mergeFrom((com.google.cloud.speech.v1p1beta1.RecognitionConfig) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.speech.v1p1beta1.RecognitionConfig other) { if (other == com.google.cloud.speech.v1p1beta1.RecognitionConfig.getDefaultInstance()) return this; if (other.encoding_ != 0) { setEncodingValue(other.getEncodingValue()); } if (other.getSampleRateHertz() != 0) { setSampleRateHertz(other.getSampleRateHertz()); } if (other.getAudioChannelCount() != 0) { setAudioChannelCount(other.getAudioChannelCount()); } if (other.getEnableSeparateRecognitionPerChannel() != false) { setEnableSeparateRecognitionPerChannel(other.getEnableSeparateRecognitionPerChannel()); } if (!other.getLanguageCode().isEmpty()) { languageCode_ = other.languageCode_; onChanged(); } if (!other.alternativeLanguageCodes_.isEmpty()) { if (alternativeLanguageCodes_.isEmpty()) { alternativeLanguageCodes_ = other.alternativeLanguageCodes_; bitField0_ = (bitField0_ & ~0x00000020); } else { ensureAlternativeLanguageCodesIsMutable(); alternativeLanguageCodes_.addAll(other.alternativeLanguageCodes_); } onChanged(); } if (other.getMaxAlternatives() != 0) { setMaxAlternatives(other.getMaxAlternatives()); } if (other.getProfanityFilter() != false) { setProfanityFilter(other.getProfanityFilter()); } if (speechContextsBuilder_ == null) { if (!other.speechContexts_.isEmpty()) { if (speechContexts_.isEmpty()) { speechContexts_ = other.speechContexts_; bitField0_ = (bitField0_ & ~0x00000100); } else { ensureSpeechContextsIsMutable(); speechContexts_.addAll(other.speechContexts_); } onChanged(); } } else { if (!other.speechContexts_.isEmpty()) { if (speechContextsBuilder_.isEmpty()) { speechContextsBuilder_.dispose(); speechContextsBuilder_ = null; speechContexts_ = other.speechContexts_; bitField0_ = (bitField0_ & ~0x00000100); speechContextsBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getSpeechContextsFieldBuilder() : null; } else { speechContextsBuilder_.addAllMessages(other.speechContexts_); } } } if (other.getEnableWordTimeOffsets() != false) { setEnableWordTimeOffsets(other.getEnableWordTimeOffsets()); } if (other.getEnableWordConfidence() != false) { setEnableWordConfidence(other.getEnableWordConfidence()); } if (other.getEnableAutomaticPunctuation() != false) { setEnableAutomaticPunctuation(other.getEnableAutomaticPunctuation()); } if (other.getEnableSpeakerDiarization() != false) { setEnableSpeakerDiarization(other.getEnableSpeakerDiarization()); } if (other.getDiarizationSpeakerCount() != 0) { setDiarizationSpeakerCount(other.getDiarizationSpeakerCount()); } if (other.hasMetadata()) { mergeMetadata(other.getMetadata()); } if (!other.getModel().isEmpty()) { model_ = other.model_; onChanged(); } if (other.getUseEnhanced() != false) { setUseEnhanced(other.getUseEnhanced()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { com.google.cloud.speech.v1p1beta1.RecognitionConfig parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (com.google.cloud.speech.v1p1beta1.RecognitionConfig) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int encoding_ = 0; /** * * *
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; */ public int getEncodingValue() { return encoding_; } /** * * *
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; */ public Builder setEncodingValue(int value) { encoding_ = value; onChanged(); return this; } /** * * *
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; */ public com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding getEncoding() { @SuppressWarnings("deprecation") com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding result = com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.valueOf(encoding_); return result == null ? com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding.UNRECOGNIZED : result; } /** * * *
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; */ public Builder setEncoding( com.google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding value) { if (value == null) { throw new NullPointerException(); } encoding_ = value.getNumber(); onChanged(); return this; } /** * * *
     * Encoding of audio data sent in all `RecognitionAudio` messages.
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding encoding = 1; */ public Builder clearEncoding() { encoding_ = 0; onChanged(); return this; } private int sampleRateHertz_; /** * * *
     * Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * 
* * int32 sample_rate_hertz = 2; */ public int getSampleRateHertz() { return sampleRateHertz_; } /** * * *
     * Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * 
* * int32 sample_rate_hertz = 2; */ public Builder setSampleRateHertz(int value) { sampleRateHertz_ = value; onChanged(); return this; } /** * * *
     * Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * This field is optional for `FLAC` and `WAV` audio files and required
     * for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
     * 
* * int32 sample_rate_hertz = 2; */ public Builder clearSampleRateHertz() { sampleRateHertz_ = 0; onChanged(); return this; } private int audioChannelCount_; /** * * *
     * *Optional* The number of channels in the input audio data.
     * ONLY set this for MULTI-CHANNEL recognition.
     * Valid values for LINEAR16 and FLAC are `1`-`8`.
     * Valid values for OGG_OPUS are '1'-'254'.
     * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
     * If `0` or omitted, defaults to one channel (mono).
     * Note: We only recognize the first channel by default.
     * To perform independent recognition on each channel set
     * `enable_separate_recognition_per_channel` to 'true'.
     * 
* * int32 audio_channel_count = 7; */ public int getAudioChannelCount() { return audioChannelCount_; } /** * * *
     * *Optional* The number of channels in the input audio data.
     * ONLY set this for MULTI-CHANNEL recognition.
     * Valid values for LINEAR16 and FLAC are `1`-`8`.
     * Valid values for OGG_OPUS are '1'-'254'.
     * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
     * If `0` or omitted, defaults to one channel (mono).
     * Note: We only recognize the first channel by default.
     * To perform independent recognition on each channel set
     * `enable_separate_recognition_per_channel` to 'true'.
     * 
* * int32 audio_channel_count = 7; */ public Builder setAudioChannelCount(int value) { audioChannelCount_ = value; onChanged(); return this; } /** * * *
     * *Optional* The number of channels in the input audio data.
     * ONLY set this for MULTI-CHANNEL recognition.
     * Valid values for LINEAR16 and FLAC are `1`-`8`.
     * Valid values for OGG_OPUS are '1'-'254'.
     * Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
     * If `0` or omitted, defaults to one channel (mono).
     * Note: We only recognize the first channel by default.
     * To perform independent recognition on each channel set
     * `enable_separate_recognition_per_channel` to 'true'.
     * 
* * int32 audio_channel_count = 7; */ public Builder clearAudioChannelCount() { audioChannelCount_ = 0; onChanged(); return this; } private boolean enableSeparateRecognitionPerChannel_; /** * * *
     * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
     * to get each channel recognized separately. The recognition result will
     * contain a `channel_tag` field to state which channel that result belongs
     * to. If this is not true, we will only recognize the first channel. The
     * request is billed cumulatively for all channels recognized:
     * `audio_channel_count` multiplied by the length of the audio.
     * 
* * bool enable_separate_recognition_per_channel = 12; */ public boolean getEnableSeparateRecognitionPerChannel() { return enableSeparateRecognitionPerChannel_; } /** * * *
     * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
     * to get each channel recognized separately. The recognition result will
     * contain a `channel_tag` field to state which channel that result belongs
     * to. If this is not true, we will only recognize the first channel. The
     * request is billed cumulatively for all channels recognized:
     * `audio_channel_count` multiplied by the length of the audio.
     * 
* * bool enable_separate_recognition_per_channel = 12; */ public Builder setEnableSeparateRecognitionPerChannel(boolean value) { enableSeparateRecognitionPerChannel_ = value; onChanged(); return this; } /** * * *
     * This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
     * to get each channel recognized separately. The recognition result will
     * contain a `channel_tag` field to state which channel that result belongs
     * to. If this is not true, we will only recognize the first channel. The
     * request is billed cumulatively for all channels recognized:
     * `audio_channel_count` multiplied by the length of the audio.
     * 
* * bool enable_separate_recognition_per_channel = 12; */ public Builder clearEnableSeparateRecognitionPerChannel() { enableSeparateRecognitionPerChannel_ = false; onChanged(); return this; } private java.lang.Object languageCode_ = ""; /** * * *
     * *Required* The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * 
* * string language_code = 3; */ public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); languageCode_ = s; return s; } else { return (java.lang.String) ref; } } /** * * *
     * *Required* The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * 
* * string language_code = 3; */ public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); languageCode_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * *
     * *Required* The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * 
* * string language_code = 3; */ public Builder setLanguageCode(java.lang.String value) { if (value == null) { throw new NullPointerException(); } languageCode_ = value; onChanged(); return this; } /** * * *
     * *Required* The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * 
* * string language_code = 3; */ public Builder clearLanguageCode() { languageCode_ = getDefaultInstance().getLanguageCode(); onChanged(); return this; } /** * * *
     * *Required* The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * 
* * string language_code = 3; */ public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); languageCode_ = value; onChanged(); return this; } private com.google.protobuf.LazyStringList alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureAlternativeLanguageCodesIsMutable() { if (!((bitField0_ & 0x00000020) == 0x00000020)) { alternativeLanguageCodes_ = new com.google.protobuf.LazyStringArrayList(alternativeLanguageCodes_); bitField0_ |= 0x00000020; } } /** * * *
     * *Optional* A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * If alternative languages are listed, recognition result will contain
     * recognition in the most likely language detected including the main
     * language_code. The recognition result will include the language tag
     * of the language detected in the audio.
     * Note: This feature is only supported for Voice Command and Voice Search
     * use cases and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; */ public com.google.protobuf.ProtocolStringList getAlternativeLanguageCodesList() { return alternativeLanguageCodes_.getUnmodifiableView(); } /** * * *
     * *Optional* A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * If alternative languages are listed, recognition result will contain
     * recognition in the most likely language detected including the main
     * language_code. The recognition result will include the language tag
     * of the language detected in the audio.
     * Note: This feature is only supported for Voice Command and Voice Search
     * use cases and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; */ public int getAlternativeLanguageCodesCount() { return alternativeLanguageCodes_.size(); } /** * * *
     * *Optional* A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * If alternative languages are listed, recognition result will contain
     * recognition in the most likely language detected including the main
     * language_code. The recognition result will include the language tag
     * of the language detected in the audio.
     * Note: This feature is only supported for Voice Command and Voice Search
     * use cases and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; */ public java.lang.String getAlternativeLanguageCodes(int index) { return alternativeLanguageCodes_.get(index); } /** * * *
     * *Optional* A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * If alternative languages are listed, recognition result will contain
     * recognition in the most likely language detected including the main
     * language_code. The recognition result will include the language tag
     * of the language detected in the audio.
     * Note: This feature is only supported for Voice Command and Voice Search
     * use cases and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; */ public com.google.protobuf.ByteString getAlternativeLanguageCodesBytes(int index) { return alternativeLanguageCodes_.getByteString(index); } /** * * *
     * *Optional* A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * If alternative languages are listed, recognition result will contain
     * recognition in the most likely language detected including the main
     * language_code. The recognition result will include the language tag
     * of the language detected in the audio.
     * Note: This feature is only supported for Voice Command and Voice Search
     * use cases and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; */ public Builder setAlternativeLanguageCodes(int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureAlternativeLanguageCodesIsMutable(); alternativeLanguageCodes_.set(index, value); onChanged(); return this; } /** * * *
     * *Optional* A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * If alternative languages are listed, recognition result will contain
     * recognition in the most likely language detected including the main
     * language_code. The recognition result will include the language tag
     * of the language detected in the audio.
     * Note: This feature is only supported for Voice Command and Voice Search
     * use cases and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; */ public Builder addAlternativeLanguageCodes(java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureAlternativeLanguageCodesIsMutable(); alternativeLanguageCodes_.add(value); onChanged(); return this; } /** * * *
     * *Optional* A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * If alternative languages are listed, recognition result will contain
     * recognition in the most likely language detected including the main
     * language_code. The recognition result will include the language tag
     * of the language detected in the audio.
     * Note: This feature is only supported for Voice Command and Voice Search
     * use cases and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; */ public Builder addAllAlternativeLanguageCodes(java.lang.Iterable values) { ensureAlternativeLanguageCodesIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll(values, alternativeLanguageCodes_); onChanged(); return this; } /** * * *
     * *Optional* A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * If alternative languages are listed, recognition result will contain
     * recognition in the most likely language detected including the main
     * language_code. The recognition result will include the language tag
     * of the language detected in the audio.
     * Note: This feature is only supported for Voice Command and Voice Search
     * use cases and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; */ public Builder clearAlternativeLanguageCodes() { alternativeLanguageCodes_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000020); onChanged(); return this; } /** * * *
     * *Optional* A list of up to 3 additional
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
     * listing possible alternative languages of the supplied audio.
     * See [Language Support](/speech-to-text/docs/languages)
     * for a list of the currently supported language codes.
     * If alternative languages are listed, recognition result will contain
     * recognition in the most likely language detected including the main
     * language_code. The recognition result will include the language tag
     * of the language detected in the audio.
     * Note: This feature is only supported for Voice Command and Voice Search
     * use cases and performance may vary for other use cases (e.g., phone call
     * transcription).
     * 
* * repeated string alternative_language_codes = 18; */ public Builder addAlternativeLanguageCodesBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); ensureAlternativeLanguageCodesIsMutable(); alternativeLanguageCodes_.add(value); onChanged(); return this; } private int maxAlternatives_; /** * * *
     * *Optional* Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * 
* * int32 max_alternatives = 4; */ public int getMaxAlternatives() { return maxAlternatives_; } /** * * *
     * *Optional* Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * 
* * int32 max_alternatives = 4; */ public Builder setMaxAlternatives(int value) { maxAlternatives_ = value; onChanged(); return this; } /** * * *
     * *Optional* Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * 
* * int32 max_alternatives = 4; */ public Builder clearMaxAlternatives() { maxAlternatives_ = 0; onChanged(); return this; } private boolean profanityFilter_; /** * * *
     * *Optional* If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * 
* * bool profanity_filter = 5; */ public boolean getProfanityFilter() { return profanityFilter_; } /** * * *
     * *Optional* If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * 
* * bool profanity_filter = 5; */ public Builder setProfanityFilter(boolean value) { profanityFilter_ = value; onChanged(); return this; } /** * * *
     * *Optional* If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * 
* * bool profanity_filter = 5; */ public Builder clearProfanityFilter() { profanityFilter_ = false; onChanged(); return this; } private java.util.List speechContexts_ = java.util.Collections.emptyList(); private void ensureSpeechContextsIsMutable() { if (!((bitField0_ & 0x00000100) == 0x00000100)) { speechContexts_ = new java.util.ArrayList( speechContexts_); bitField0_ |= 0x00000100; } } private com.google.protobuf.RepeatedFieldBuilderV3< com.google.cloud.speech.v1p1beta1.SpeechContext, com.google.cloud.speech.v1p1beta1.SpeechContext.Builder, com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder> speechContextsBuilder_; /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsList() { if (speechContextsBuilder_ == null) { return java.util.Collections.unmodifiableList(speechContexts_); } else { return speechContextsBuilder_.getMessageList(); } } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public int getSpeechContextsCount() { if (speechContextsBuilder_ == null) { return speechContexts_.size(); } else { return speechContextsBuilder_.getCount(); } } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1p1beta1.SpeechContext getSpeechContexts(int index) { if (speechContextsBuilder_ == null) { return speechContexts_.get(index); } else { return speechContextsBuilder_.getMessage(index); } } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public Builder setSpeechContexts( int index, com.google.cloud.speech.v1p1beta1.SpeechContext value) { if (speechContextsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSpeechContextsIsMutable(); speechContexts_.set(index, value); onChanged(); } else { speechContextsBuilder_.setMessage(index, value); } return this; } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public Builder setSpeechContexts( int index, com.google.cloud.speech.v1p1beta1.SpeechContext.Builder builderForValue) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.set(index, builderForValue.build()); onChanged(); } else { speechContextsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public Builder addSpeechContexts(com.google.cloud.speech.v1p1beta1.SpeechContext value) { if (speechContextsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSpeechContextsIsMutable(); speechContexts_.add(value); onChanged(); } else { speechContextsBuilder_.addMessage(value); } return this; } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public Builder addSpeechContexts( int index, com.google.cloud.speech.v1p1beta1.SpeechContext value) { if (speechContextsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSpeechContextsIsMutable(); speechContexts_.add(index, value); onChanged(); } else { speechContextsBuilder_.addMessage(index, value); } return this; } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public Builder addSpeechContexts( com.google.cloud.speech.v1p1beta1.SpeechContext.Builder builderForValue) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.add(builderForValue.build()); onChanged(); } else { speechContextsBuilder_.addMessage(builderForValue.build()); } return this; } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public Builder addSpeechContexts( int index, com.google.cloud.speech.v1p1beta1.SpeechContext.Builder builderForValue) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.add(index, builderForValue.build()); onChanged(); } else { speechContextsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public Builder addAllSpeechContexts( java.lang.Iterable values) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll(values, speechContexts_); onChanged(); } else { speechContextsBuilder_.addAllMessages(values); } return this; } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public Builder clearSpeechContexts() { if (speechContextsBuilder_ == null) { speechContexts_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000100); onChanged(); } else { speechContextsBuilder_.clear(); } return this; } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public Builder removeSpeechContexts(int index) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.remove(index); onChanged(); } else { speechContextsBuilder_.remove(index); } return this; } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder getSpeechContextsBuilder( int index) { return getSpeechContextsFieldBuilder().getBuilder(index); } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder getSpeechContextsOrBuilder( int index) { if (speechContextsBuilder_ == null) { return speechContexts_.get(index); } else { return speechContextsBuilder_.getMessageOrBuilder(index); } } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsOrBuilderList() { if (speechContextsBuilder_ != null) { return speechContextsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(speechContexts_); } } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder addSpeechContextsBuilder() { return getSpeechContextsFieldBuilder() .addBuilder(com.google.cloud.speech.v1p1beta1.SpeechContext.getDefaultInstance()); } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1p1beta1.SpeechContext.Builder addSpeechContextsBuilder( int index) { return getSpeechContextsFieldBuilder() .addBuilder(index, com.google.cloud.speech.v1p1beta1.SpeechContext.getDefaultInstance()); } /** * * *
     * *Optional* array of [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
     * A means to provide context to assist the speech recognition. For more
     * information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
     * 
* * repeated .google.cloud.speech.v1p1beta1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsBuilderList() { return getSpeechContextsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< com.google.cloud.speech.v1p1beta1.SpeechContext, com.google.cloud.speech.v1p1beta1.SpeechContext.Builder, com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder> getSpeechContextsFieldBuilder() { if (speechContextsBuilder_ == null) { speechContextsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< com.google.cloud.speech.v1p1beta1.SpeechContext, com.google.cloud.speech.v1p1beta1.SpeechContext.Builder, com.google.cloud.speech.v1p1beta1.SpeechContextOrBuilder>( speechContexts_, ((bitField0_ & 0x00000100) == 0x00000100), getParentForChildren(), isClean()); speechContexts_ = null; } return speechContextsBuilder_; } private boolean enableWordTimeOffsets_; /** * * *
     * *Optional* If `true`, the top result includes a list of words and
     * the start and end time offsets (timestamps) for those words. If
     * `false`, no word-level time offset information is returned. The default is
     * `false`.
     * 
* * bool enable_word_time_offsets = 8; */ public boolean getEnableWordTimeOffsets() { return enableWordTimeOffsets_; } /** * * *
     * *Optional* If `true`, the top result includes a list of words and
     * the start and end time offsets (timestamps) for those words. If
     * `false`, no word-level time offset information is returned. The default is
     * `false`.
     * 
* * bool enable_word_time_offsets = 8; */ public Builder setEnableWordTimeOffsets(boolean value) { enableWordTimeOffsets_ = value; onChanged(); return this; } /** * * *
     * *Optional* If `true`, the top result includes a list of words and
     * the start and end time offsets (timestamps) for those words. If
     * `false`, no word-level time offset information is returned. The default is
     * `false`.
     * 
* * bool enable_word_time_offsets = 8; */ public Builder clearEnableWordTimeOffsets() { enableWordTimeOffsets_ = false; onChanged(); return this; } private boolean enableWordConfidence_; /** * * *
     * *Optional* If `true`, the top result includes a list of words and the
     * confidence for those words. If `false`, no word-level confidence
     * information is returned. The default is `false`.
     * 
* * bool enable_word_confidence = 15; */ public boolean getEnableWordConfidence() { return enableWordConfidence_; } /** * * *
     * *Optional* If `true`, the top result includes a list of words and the
     * confidence for those words. If `false`, no word-level confidence
     * information is returned. The default is `false`.
     * 
* * bool enable_word_confidence = 15; */ public Builder setEnableWordConfidence(boolean value) { enableWordConfidence_ = value; onChanged(); return this; } /** * * *
     * *Optional* If `true`, the top result includes a list of words and the
     * confidence for those words. If `false`, no word-level confidence
     * information is returned. The default is `false`.
     * 
* * bool enable_word_confidence = 15; */ public Builder clearEnableWordConfidence() { enableWordConfidence_ = false; onChanged(); return this; } private boolean enableAutomaticPunctuation_; /** * * *
     * *Optional* If 'true', adds punctuation to recognition result hypotheses.
     * This feature is only available in select languages. Setting this for
     * requests in other languages has no effect at all.
     * The default 'false' value does not add punctuation to result hypotheses.
     * Note: This is currently offered as an experimental service, complimentary
     * to all users. In the future this may be exclusively available as a
     * premium feature.
     * 
* * bool enable_automatic_punctuation = 11; */ public boolean getEnableAutomaticPunctuation() { return enableAutomaticPunctuation_; } /** * * *
     * *Optional* If 'true', adds punctuation to recognition result hypotheses.
     * This feature is only available in select languages. Setting this for
     * requests in other languages has no effect at all.
     * The default 'false' value does not add punctuation to result hypotheses.
     * Note: This is currently offered as an experimental service, complimentary
     * to all users. In the future this may be exclusively available as a
     * premium feature.
     * 
* * bool enable_automatic_punctuation = 11; */ public Builder setEnableAutomaticPunctuation(boolean value) { enableAutomaticPunctuation_ = value; onChanged(); return this; } /** * * *
     * *Optional* If 'true', adds punctuation to recognition result hypotheses.
     * This feature is only available in select languages. Setting this for
     * requests in other languages has no effect at all.
     * The default 'false' value does not add punctuation to result hypotheses.
     * Note: This is currently offered as an experimental service, complimentary
     * to all users. In the future this may be exclusively available as a
     * premium feature.
     * 
* * bool enable_automatic_punctuation = 11; */ public Builder clearEnableAutomaticPunctuation() { enableAutomaticPunctuation_ = false; onChanged(); return this; } private boolean enableSpeakerDiarization_; /** * * *
     * *Optional* If 'true', enables speaker detection for each recognized word in
     * the top alternative of the recognition result using a speaker_tag provided
     * in the WordInfo.
     * Note: When this is true, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * 
* * bool enable_speaker_diarization = 16; */ public boolean getEnableSpeakerDiarization() { return enableSpeakerDiarization_; } /** * * *
     * *Optional* If 'true', enables speaker detection for each recognized word in
     * the top alternative of the recognition result using a speaker_tag provided
     * in the WordInfo.
     * Note: When this is true, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * 
* * bool enable_speaker_diarization = 16; */ public Builder setEnableSpeakerDiarization(boolean value) { enableSpeakerDiarization_ = value; onChanged(); return this; } /** * * *
     * *Optional* If 'true', enables speaker detection for each recognized word in
     * the top alternative of the recognition result using a speaker_tag provided
     * in the WordInfo.
     * Note: When this is true, we send all the words from the beginning of the
     * audio for the top alternative in every consecutive STREAMING responses.
     * This is done in order to improve our speaker tags as our models learn to
     * identify the speakers in the conversation over time.
     * For non-streaming requests, the diarization results will be provided only
     * in the top alternative of the FINAL SpeechRecognitionResult.
     * 
* * bool enable_speaker_diarization = 16; */ public Builder clearEnableSpeakerDiarization() { enableSpeakerDiarization_ = false; onChanged(); return this; } private int diarizationSpeakerCount_; /** * * *
     * *Optional*
     * If set, specifies the estimated number of speakers in the conversation.
     * If not set, defaults to '2'.
     * Ignored unless enable_speaker_diarization is set to true."
     * 
* * int32 diarization_speaker_count = 17; */ public int getDiarizationSpeakerCount() { return diarizationSpeakerCount_; } /** * * *
     * *Optional*
     * If set, specifies the estimated number of speakers in the conversation.
     * If not set, defaults to '2'.
     * Ignored unless enable_speaker_diarization is set to true."
     * 
* * int32 diarization_speaker_count = 17; */ public Builder setDiarizationSpeakerCount(int value) { diarizationSpeakerCount_ = value; onChanged(); return this; } /** * * *
     * *Optional*
     * If set, specifies the estimated number of speakers in the conversation.
     * If not set, defaults to '2'.
     * Ignored unless enable_speaker_diarization is set to true."
     * 
* * int32 diarization_speaker_count = 17; */ public Builder clearDiarizationSpeakerCount() { diarizationSpeakerCount_ = 0; onChanged(); return this; } private com.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata_ = null; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1p1beta1.RecognitionMetadata, com.google.cloud.speech.v1p1beta1.RecognitionMetadata.Builder, com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder> metadataBuilder_; /** * * *
     * *Optional* Metadata regarding this request.
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; */ public boolean hasMetadata() { return metadataBuilder_ != null || metadata_ != null; } /** * * *
     * *Optional* Metadata regarding this request.
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; */ public com.google.cloud.speech.v1p1beta1.RecognitionMetadata getMetadata() { if (metadataBuilder_ == null) { return metadata_ == null ? com.google.cloud.speech.v1p1beta1.RecognitionMetadata.getDefaultInstance() : metadata_; } else { return metadataBuilder_.getMessage(); } } /** * * *
     * *Optional* Metadata regarding this request.
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; */ public Builder setMetadata(com.google.cloud.speech.v1p1beta1.RecognitionMetadata value) { if (metadataBuilder_ == null) { if (value == null) { throw new NullPointerException(); } metadata_ = value; onChanged(); } else { metadataBuilder_.setMessage(value); } return this; } /** * * *
     * *Optional* Metadata regarding this request.
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; */ public Builder setMetadata( com.google.cloud.speech.v1p1beta1.RecognitionMetadata.Builder builderForValue) { if (metadataBuilder_ == null) { metadata_ = builderForValue.build(); onChanged(); } else { metadataBuilder_.setMessage(builderForValue.build()); } return this; } /** * * *
     * *Optional* Metadata regarding this request.
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; */ public Builder mergeMetadata(com.google.cloud.speech.v1p1beta1.RecognitionMetadata value) { if (metadataBuilder_ == null) { if (metadata_ != null) { metadata_ = com.google.cloud.speech.v1p1beta1.RecognitionMetadata.newBuilder(metadata_) .mergeFrom(value) .buildPartial(); } else { metadata_ = value; } onChanged(); } else { metadataBuilder_.mergeFrom(value); } return this; } /** * * *
     * *Optional* Metadata regarding this request.
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; */ public Builder clearMetadata() { if (metadataBuilder_ == null) { metadata_ = null; onChanged(); } else { metadata_ = null; metadataBuilder_ = null; } return this; } /** * * *
     * *Optional* Metadata regarding this request.
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; */ public com.google.cloud.speech.v1p1beta1.RecognitionMetadata.Builder getMetadataBuilder() { onChanged(); return getMetadataFieldBuilder().getBuilder(); } /** * * *
     * *Optional* Metadata regarding this request.
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; */ public com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder getMetadataOrBuilder() { if (metadataBuilder_ != null) { return metadataBuilder_.getMessageOrBuilder(); } else { return metadata_ == null ? com.google.cloud.speech.v1p1beta1.RecognitionMetadata.getDefaultInstance() : metadata_; } } /** * * *
     * *Optional* Metadata regarding this request.
     * 
* * .google.cloud.speech.v1p1beta1.RecognitionMetadata metadata = 9; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1p1beta1.RecognitionMetadata, com.google.cloud.speech.v1p1beta1.RecognitionMetadata.Builder, com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder> getMetadataFieldBuilder() { if (metadataBuilder_ == null) { metadataBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.speech.v1p1beta1.RecognitionMetadata, com.google.cloud.speech.v1p1beta1.RecognitionMetadata.Builder, com.google.cloud.speech.v1p1beta1.RecognitionMetadataOrBuilder>( getMetadata(), getParentForChildren(), isClean()); metadata_ = null; } return metadataBuilder_; } private java.lang.Object model_ = ""; /** * * *
     * *Optional* Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * <table>
     *   <tr>
     *     <td><b>Model</b></td>
     *     <td><b>Description</b></td>
     *   </tr>
     *   <tr>
     *     <td><code>command_and_search</code></td>
     *     <td>Best for short queries such as voice commands or voice search.</td>
     *   </tr>
     *   <tr>
     *     <td><code>phone_call</code></td>
     *     <td>Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).</td>
     *   </tr>
     *   <tr>
     *     <td><code>video</code></td>
     *     <td>Best for audio that originated from from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>default</code></td>
     *     <td>Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.</td>
     *   </tr>
     * </table>
     * 
* * string model = 13; */ public java.lang.String getModel() { java.lang.Object ref = model_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); model_ = s; return s; } else { return (java.lang.String) ref; } } /** * * *
     * *Optional* Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * <table>
     *   <tr>
     *     <td><b>Model</b></td>
     *     <td><b>Description</b></td>
     *   </tr>
     *   <tr>
     *     <td><code>command_and_search</code></td>
     *     <td>Best for short queries such as voice commands or voice search.</td>
     *   </tr>
     *   <tr>
     *     <td><code>phone_call</code></td>
     *     <td>Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).</td>
     *   </tr>
     *   <tr>
     *     <td><code>video</code></td>
     *     <td>Best for audio that originated from from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>default</code></td>
     *     <td>Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.</td>
     *   </tr>
     * </table>
     * 
* * string model = 13; */ public com.google.protobuf.ByteString getModelBytes() { java.lang.Object ref = model_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); model_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * *
     * *Optional* Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * <table>
     *   <tr>
     *     <td><b>Model</b></td>
     *     <td><b>Description</b></td>
     *   </tr>
     *   <tr>
     *     <td><code>command_and_search</code></td>
     *     <td>Best for short queries such as voice commands or voice search.</td>
     *   </tr>
     *   <tr>
     *     <td><code>phone_call</code></td>
     *     <td>Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).</td>
     *   </tr>
     *   <tr>
     *     <td><code>video</code></td>
     *     <td>Best for audio that originated from from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>default</code></td>
     *     <td>Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.</td>
     *   </tr>
     * </table>
     * 
* * string model = 13; */ public Builder setModel(java.lang.String value) { if (value == null) { throw new NullPointerException(); } model_ = value; onChanged(); return this; } /** * * *
     * *Optional* Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * <table>
     *   <tr>
     *     <td><b>Model</b></td>
     *     <td><b>Description</b></td>
     *   </tr>
     *   <tr>
     *     <td><code>command_and_search</code></td>
     *     <td>Best for short queries such as voice commands or voice search.</td>
     *   </tr>
     *   <tr>
     *     <td><code>phone_call</code></td>
     *     <td>Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).</td>
     *   </tr>
     *   <tr>
     *     <td><code>video</code></td>
     *     <td>Best for audio that originated from from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>default</code></td>
     *     <td>Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.</td>
     *   </tr>
     * </table>
     * 
* * string model = 13; */ public Builder clearModel() { model_ = getDefaultInstance().getModel(); onChanged(); return this; } /** * * *
     * *Optional* Which model to select for the given request. Select the model
     * best suited to your domain to get best results. If a model is not
     * explicitly specified, then we auto-select a model based on the parameters
     * in the RecognitionConfig.
     * <table>
     *   <tr>
     *     <td><b>Model</b></td>
     *     <td><b>Description</b></td>
     *   </tr>
     *   <tr>
     *     <td><code>command_and_search</code></td>
     *     <td>Best for short queries such as voice commands or voice search.</td>
     *   </tr>
     *   <tr>
     *     <td><code>phone_call</code></td>
     *     <td>Best for audio that originated from a phone call (typically
     *     recorded at an 8khz sampling rate).</td>
     *   </tr>
     *   <tr>
     *     <td><code>video</code></td>
     *     <td>Best for audio that originated from from video or includes multiple
     *         speakers. Ideally the audio is recorded at a 16khz or greater
     *         sampling rate. This is a premium model that costs more than the
     *         standard rate.</td>
     *   </tr>
     *   <tr>
     *     <td><code>default</code></td>
     *     <td>Best for audio that is not one of the specific audio models.
     *         For example, long-form audio. Ideally the audio is high-fidelity,
     *         recorded at a 16khz or greater sampling rate.</td>
     *   </tr>
     * </table>
     * 
* * string model = 13; */ public Builder setModelBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); model_ = value; onChanged(); return this; } private boolean useEnhanced_; /** * * *
     * *Optional* Set to true to use an enhanced model for speech recognition.
     * If `use_enhanced` is set to true and the `model` field is not set, then
     * an appropriate enhanced model is chosen if:
     * 1. project is eligible for requesting enhanced models
     * 2. an enhanced model exists for the audio
     * If `use_enhanced` is true and an enhanced version of the specified model
     * does not exist, then the speech is recognized using the standard version
     * of the specified model.
     * Enhanced speech models require that you opt-in to data logging using
     * instructions in the
     * [documentation](/speech-to-text/docs/enable-data-logging). If you set
     * `use_enhanced` to true and you have not enabled audio logging, then you
     * will receive an error.
     * 
* * bool use_enhanced = 14; */ public boolean getUseEnhanced() { return useEnhanced_; } /** * * *
     * *Optional* Set to true to use an enhanced model for speech recognition.
     * If `use_enhanced` is set to true and the `model` field is not set, then
     * an appropriate enhanced model is chosen if:
     * 1. project is eligible for requesting enhanced models
     * 2. an enhanced model exists for the audio
     * If `use_enhanced` is true and an enhanced version of the specified model
     * does not exist, then the speech is recognized using the standard version
     * of the specified model.
     * Enhanced speech models require that you opt-in to data logging using
     * instructions in the
     * [documentation](/speech-to-text/docs/enable-data-logging). If you set
     * `use_enhanced` to true and you have not enabled audio logging, then you
     * will receive an error.
     * 
* * bool use_enhanced = 14; */ public Builder setUseEnhanced(boolean value) { useEnhanced_ = value; onChanged(); return this; } /** * * *
     * *Optional* Set to true to use an enhanced model for speech recognition.
     * If `use_enhanced` is set to true and the `model` field is not set, then
     * an appropriate enhanced model is chosen if:
     * 1. project is eligible for requesting enhanced models
     * 2. an enhanced model exists for the audio
     * If `use_enhanced` is true and an enhanced version of the specified model
     * does not exist, then the speech is recognized using the standard version
     * of the specified model.
     * Enhanced speech models require that you opt-in to data logging using
     * instructions in the
     * [documentation](/speech-to-text/docs/enable-data-logging). If you set
     * `use_enhanced` to true and you have not enabled audio logging, then you
     * will receive an error.
     * 
* * bool use_enhanced = 14; */ public Builder clearUseEnhanced() { useEnhanced_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFieldsProto3(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.speech.v1p1beta1.RecognitionConfig) } // @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.RecognitionConfig) private static final com.google.cloud.speech.v1p1beta1.RecognitionConfig DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.speech.v1p1beta1.RecognitionConfig(); } public static com.google.cloud.speech.v1p1beta1.RecognitionConfig getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override public RecognitionConfig parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RecognitionConfig(input, extensionRegistry); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.speech.v1p1beta1.RecognitionConfig getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy