All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.dialogflow.v2.SpeechToTextConfig Maven / Gradle / Ivy

There is a newer version: 4.59.0
Show newest version
/*
 * Copyright 2024 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/dialogflow/v2/audio_config.proto

// Protobuf Java Version: 3.25.5
package com.google.cloud.dialogflow.v2;

/**
 *
 *
 * 
 * Configures speech transcription for
 * [ConversationProfile][google.cloud.dialogflow.v2.ConversationProfile].
 * 
* * Protobuf type {@code google.cloud.dialogflow.v2.SpeechToTextConfig} */ public final class SpeechToTextConfig extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.dialogflow.v2.SpeechToTextConfig) SpeechToTextConfigOrBuilder { private static final long serialVersionUID = 0L; // Use SpeechToTextConfig.newBuilder() to construct. private SpeechToTextConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private SpeechToTextConfig() { speechModelVariant_ = 0; model_ = ""; audioEncoding_ = 0; languageCode_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new SpeechToTextConfig(); } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.dialogflow.v2.AudioConfigProto .internal_static_google_cloud_dialogflow_v2_SpeechToTextConfig_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.dialogflow.v2.AudioConfigProto .internal_static_google_cloud_dialogflow_v2_SpeechToTextConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.dialogflow.v2.SpeechToTextConfig.class, com.google.cloud.dialogflow.v2.SpeechToTextConfig.Builder.class); } public static final int SPEECH_MODEL_VARIANT_FIELD_NUMBER = 1; private int speechModelVariant_ = 0; /** * * *
   * The speech model used in speech to text.
   * `SPEECH_MODEL_VARIANT_UNSPECIFIED`, `USE_BEST_AVAILABLE` will be treated as
   * `USE_ENHANCED`. It can be overridden in
   * [AnalyzeContentRequest][google.cloud.dialogflow.v2.AnalyzeContentRequest]
   * and
   * [StreamingAnalyzeContentRequest][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest]
   * request. If enhanced model variant is specified and an enhanced version of
   * the specified model for the language does not exist, then it would emit an
   * error.
   * 
* * .google.cloud.dialogflow.v2.SpeechModelVariant speech_model_variant = 1; * * @return The enum numeric value on the wire for speechModelVariant. */ @java.lang.Override public int getSpeechModelVariantValue() { return speechModelVariant_; } /** * * *
   * The speech model used in speech to text.
   * `SPEECH_MODEL_VARIANT_UNSPECIFIED`, `USE_BEST_AVAILABLE` will be treated as
   * `USE_ENHANCED`. It can be overridden in
   * [AnalyzeContentRequest][google.cloud.dialogflow.v2.AnalyzeContentRequest]
   * and
   * [StreamingAnalyzeContentRequest][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest]
   * request. If enhanced model variant is specified and an enhanced version of
   * the specified model for the language does not exist, then it would emit an
   * error.
   * 
* * .google.cloud.dialogflow.v2.SpeechModelVariant speech_model_variant = 1; * * @return The speechModelVariant. */ @java.lang.Override public com.google.cloud.dialogflow.v2.SpeechModelVariant getSpeechModelVariant() { com.google.cloud.dialogflow.v2.SpeechModelVariant result = com.google.cloud.dialogflow.v2.SpeechModelVariant.forNumber(speechModelVariant_); return result == null ? com.google.cloud.dialogflow.v2.SpeechModelVariant.UNRECOGNIZED : result; } public static final int MODEL_FIELD_NUMBER = 2; @SuppressWarnings("serial") private volatile java.lang.Object model_ = ""; /** * * *
   * Which Speech model to select. Select the
   * model best suited to your domain to get best results. If a model is not
   * explicitly specified, then Dialogflow auto-selects a model based on other
   * parameters in the SpeechToTextConfig and Agent settings.
   * If enhanced speech model is enabled for the agent and an enhanced
   * version of the specified model for the language does not exist, then the
   * speech is recognized using the standard version of the specified model.
   * Refer to
   * [Cloud Speech API
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
   * for more details.
   * If you specify a model, the following models typically have the best
   * performance:
   *
   * - phone_call (best for Agent Assist and telephony)
   * - latest_short (best for Dialogflow non-telephony)
   * - command_and_search
   *
   * Leave this field unspecified to use
   * [Agent Speech
   * settings](https://cloud.google.com/dialogflow/cx/docs/concept/agent#settings-speech)
   * for model selection.
   * 
* * string model = 2; * * @return The model. */ @java.lang.Override public java.lang.String getModel() { java.lang.Object ref = model_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); model_ = s; return s; } } /** * * *
   * Which Speech model to select. Select the
   * model best suited to your domain to get best results. If a model is not
   * explicitly specified, then Dialogflow auto-selects a model based on other
   * parameters in the SpeechToTextConfig and Agent settings.
   * If enhanced speech model is enabled for the agent and an enhanced
   * version of the specified model for the language does not exist, then the
   * speech is recognized using the standard version of the specified model.
   * Refer to
   * [Cloud Speech API
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
   * for more details.
   * If you specify a model, the following models typically have the best
   * performance:
   *
   * - phone_call (best for Agent Assist and telephony)
   * - latest_short (best for Dialogflow non-telephony)
   * - command_and_search
   *
   * Leave this field unspecified to use
   * [Agent Speech
   * settings](https://cloud.google.com/dialogflow/cx/docs/concept/agent#settings-speech)
   * for model selection.
   * 
* * string model = 2; * * @return The bytes for model. */ @java.lang.Override public com.google.protobuf.ByteString getModelBytes() { java.lang.Object ref = model_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); model_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int AUDIO_ENCODING_FIELD_NUMBER = 6; private int audioEncoding_ = 0; /** * * *
   * Audio encoding of the audio content to process.
   * 
* * .google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 6; * * @return The enum numeric value on the wire for audioEncoding. */ @java.lang.Override public int getAudioEncodingValue() { return audioEncoding_; } /** * * *
   * Audio encoding of the audio content to process.
   * 
* * .google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 6; * * @return The audioEncoding. */ @java.lang.Override public com.google.cloud.dialogflow.v2.AudioEncoding getAudioEncoding() { com.google.cloud.dialogflow.v2.AudioEncoding result = com.google.cloud.dialogflow.v2.AudioEncoding.forNumber(audioEncoding_); return result == null ? com.google.cloud.dialogflow.v2.AudioEncoding.UNRECOGNIZED : result; } public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 7; private int sampleRateHertz_ = 0; /** * * *
   * Sample rate (in Hertz) of the audio content sent in the query.
   * Refer to [Cloud Speech API
   * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
   * more details.
   * 
* * int32 sample_rate_hertz = 7; * * @return The sampleRateHertz. */ @java.lang.Override public int getSampleRateHertz() { return sampleRateHertz_; } public static final int LANGUAGE_CODE_FIELD_NUMBER = 8; @SuppressWarnings("serial") private volatile java.lang.Object languageCode_ = ""; /** * * *
   * The language of the supplied audio. Dialogflow does not do
   * translations. See [Language
   * Support](https://cloud.google.com/dialogflow/docs/reference/language)
   * for a list of the currently supported language codes. Note that queries in
   * the same session do not necessarily need to specify the same language.
   * 
* * string language_code = 8; * * @return The languageCode. */ @java.lang.Override public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); languageCode_ = s; return s; } } /** * * *
   * The language of the supplied audio. Dialogflow does not do
   * translations. See [Language
   * Support](https://cloud.google.com/dialogflow/docs/reference/language)
   * for a list of the currently supported language codes. Note that queries in
   * the same session do not necessarily need to specify the same language.
   * 
* * string language_code = 8; * * @return The bytes for languageCode. */ @java.lang.Override public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); languageCode_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int ENABLE_WORD_INFO_FIELD_NUMBER = 9; private boolean enableWordInfo_ = false; /** * * *
   * If `true`, Dialogflow returns
   * [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
   * [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
   * with information about the recognized speech words, e.g. start and end time
   * offsets. If false or unspecified, Speech doesn't return any word-level
   * information.
   * 
* * bool enable_word_info = 9; * * @return The enableWordInfo. */ @java.lang.Override public boolean getEnableWordInfo() { return enableWordInfo_; } public static final int USE_TIMEOUT_BASED_ENDPOINTING_FIELD_NUMBER = 11; private boolean useTimeoutBasedEndpointing_ = false; /** * * *
   * Use timeout based endpointing, interpreting endpointer sensitivy as
   * seconds of timeout value.
   * 
* * bool use_timeout_based_endpointing = 11; * * @return The useTimeoutBasedEndpointing. */ @java.lang.Override public boolean getUseTimeoutBasedEndpointing() { return useTimeoutBasedEndpointing_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (speechModelVariant_ != com.google.cloud.dialogflow.v2.SpeechModelVariant.SPEECH_MODEL_VARIANT_UNSPECIFIED .getNumber()) { output.writeEnum(1, speechModelVariant_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, model_); } if (audioEncoding_ != com.google.cloud.dialogflow.v2.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED.getNumber()) { output.writeEnum(6, audioEncoding_); } if (sampleRateHertz_ != 0) { output.writeInt32(7, sampleRateHertz_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 8, languageCode_); } if (enableWordInfo_ != false) { output.writeBool(9, enableWordInfo_); } if (useTimeoutBasedEndpointing_ != false) { output.writeBool(11, useTimeoutBasedEndpointing_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (speechModelVariant_ != com.google.cloud.dialogflow.v2.SpeechModelVariant.SPEECH_MODEL_VARIANT_UNSPECIFIED .getNumber()) { size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, speechModelVariant_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, model_); } if (audioEncoding_ != com.google.cloud.dialogflow.v2.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED.getNumber()) { size += com.google.protobuf.CodedOutputStream.computeEnumSize(6, audioEncoding_); } if (sampleRateHertz_ != 0) { size += com.google.protobuf.CodedOutputStream.computeInt32Size(7, sampleRateHertz_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(8, languageCode_); } if (enableWordInfo_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(9, enableWordInfo_); } if (useTimeoutBasedEndpointing_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(11, useTimeoutBasedEndpointing_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.dialogflow.v2.SpeechToTextConfig)) { return super.equals(obj); } com.google.cloud.dialogflow.v2.SpeechToTextConfig other = (com.google.cloud.dialogflow.v2.SpeechToTextConfig) obj; if (speechModelVariant_ != other.speechModelVariant_) return false; if (!getModel().equals(other.getModel())) return false; if (audioEncoding_ != other.audioEncoding_) return false; if (getSampleRateHertz() != other.getSampleRateHertz()) return false; if (!getLanguageCode().equals(other.getLanguageCode())) return false; if (getEnableWordInfo() != other.getEnableWordInfo()) return false; if (getUseTimeoutBasedEndpointing() != other.getUseTimeoutBasedEndpointing()) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + SPEECH_MODEL_VARIANT_FIELD_NUMBER; hash = (53 * hash) + speechModelVariant_; hash = (37 * hash) + MODEL_FIELD_NUMBER; hash = (53 * hash) + getModel().hashCode(); hash = (37 * hash) + AUDIO_ENCODING_FIELD_NUMBER; hash = (53 * hash) + audioEncoding_; hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER; hash = (53 * hash) + getSampleRateHertz(); hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER; hash = (53 * hash) + getLanguageCode().hashCode(); hash = (37 * hash) + ENABLE_WORD_INFO_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordInfo()); hash = (37 * hash) + USE_TIMEOUT_BASED_ENDPOINTING_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getUseTimeoutBasedEndpointing()); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig parseFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(com.google.cloud.dialogflow.v2.SpeechToTextConfig prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * *
   * Configures speech transcription for
   * [ConversationProfile][google.cloud.dialogflow.v2.ConversationProfile].
   * 
* * Protobuf type {@code google.cloud.dialogflow.v2.SpeechToTextConfig} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.cloud.dialogflow.v2.SpeechToTextConfig) com.google.cloud.dialogflow.v2.SpeechToTextConfigOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.dialogflow.v2.AudioConfigProto .internal_static_google_cloud_dialogflow_v2_SpeechToTextConfig_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.dialogflow.v2.AudioConfigProto .internal_static_google_cloud_dialogflow_v2_SpeechToTextConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.dialogflow.v2.SpeechToTextConfig.class, com.google.cloud.dialogflow.v2.SpeechToTextConfig.Builder.class); } // Construct using com.google.cloud.dialogflow.v2.SpeechToTextConfig.newBuilder() private Builder() {} private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; speechModelVariant_ = 0; model_ = ""; audioEncoding_ = 0; sampleRateHertz_ = 0; languageCode_ = ""; enableWordInfo_ = false; useTimeoutBasedEndpointing_ = false; return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.dialogflow.v2.AudioConfigProto .internal_static_google_cloud_dialogflow_v2_SpeechToTextConfig_descriptor; } @java.lang.Override public com.google.cloud.dialogflow.v2.SpeechToTextConfig getDefaultInstanceForType() { return com.google.cloud.dialogflow.v2.SpeechToTextConfig.getDefaultInstance(); } @java.lang.Override public com.google.cloud.dialogflow.v2.SpeechToTextConfig build() { com.google.cloud.dialogflow.v2.SpeechToTextConfig result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.dialogflow.v2.SpeechToTextConfig buildPartial() { com.google.cloud.dialogflow.v2.SpeechToTextConfig result = new com.google.cloud.dialogflow.v2.SpeechToTextConfig(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(com.google.cloud.dialogflow.v2.SpeechToTextConfig result) { int from_bitField0_ = bitField0_; if (((from_bitField0_ & 0x00000001) != 0)) { result.speechModelVariant_ = speechModelVariant_; } if (((from_bitField0_ & 0x00000002) != 0)) { result.model_ = model_; } if (((from_bitField0_ & 0x00000004) != 0)) { result.audioEncoding_ = audioEncoding_; } if (((from_bitField0_ & 0x00000008) != 0)) { result.sampleRateHertz_ = sampleRateHertz_; } if (((from_bitField0_ & 0x00000010) != 0)) { result.languageCode_ = languageCode_; } if (((from_bitField0_ & 0x00000020) != 0)) { result.enableWordInfo_ = enableWordInfo_; } if (((from_bitField0_ & 0x00000040) != 0)) { result.useTimeoutBasedEndpointing_ = useTimeoutBasedEndpointing_; } } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.dialogflow.v2.SpeechToTextConfig) { return mergeFrom((com.google.cloud.dialogflow.v2.SpeechToTextConfig) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.dialogflow.v2.SpeechToTextConfig other) { if (other == com.google.cloud.dialogflow.v2.SpeechToTextConfig.getDefaultInstance()) return this; if (other.speechModelVariant_ != 0) { setSpeechModelVariantValue(other.getSpeechModelVariantValue()); } if (!other.getModel().isEmpty()) { model_ = other.model_; bitField0_ |= 0x00000002; onChanged(); } if (other.audioEncoding_ != 0) { setAudioEncodingValue(other.getAudioEncodingValue()); } if (other.getSampleRateHertz() != 0) { setSampleRateHertz(other.getSampleRateHertz()); } if (!other.getLanguageCode().isEmpty()) { languageCode_ = other.languageCode_; bitField0_ |= 0x00000010; onChanged(); } if (other.getEnableWordInfo() != false) { setEnableWordInfo(other.getEnableWordInfo()); } if (other.getUseTimeoutBasedEndpointing() != false) { setUseTimeoutBasedEndpointing(other.getUseTimeoutBasedEndpointing()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { speechModelVariant_ = input.readEnum(); bitField0_ |= 0x00000001; break; } // case 8 case 18: { model_ = input.readStringRequireUtf8(); bitField0_ |= 0x00000002; break; } // case 18 case 48: { audioEncoding_ = input.readEnum(); bitField0_ |= 0x00000004; break; } // case 48 case 56: { sampleRateHertz_ = input.readInt32(); bitField0_ |= 0x00000008; break; } // case 56 case 66: { languageCode_ = input.readStringRequireUtf8(); bitField0_ |= 0x00000010; break; } // case 66 case 72: { enableWordInfo_ = input.readBool(); bitField0_ |= 0x00000020; break; } // case 72 case 88: { useTimeoutBasedEndpointing_ = input.readBool(); bitField0_ |= 0x00000040; break; } // case 88 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private int speechModelVariant_ = 0; /** * * *
     * The speech model used in speech to text.
     * `SPEECH_MODEL_VARIANT_UNSPECIFIED`, `USE_BEST_AVAILABLE` will be treated as
     * `USE_ENHANCED`. It can be overridden in
     * [AnalyzeContentRequest][google.cloud.dialogflow.v2.AnalyzeContentRequest]
     * and
     * [StreamingAnalyzeContentRequest][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest]
     * request. If enhanced model variant is specified and an enhanced version of
     * the specified model for the language does not exist, then it would emit an
     * error.
     * 
* * .google.cloud.dialogflow.v2.SpeechModelVariant speech_model_variant = 1; * * @return The enum numeric value on the wire for speechModelVariant. */ @java.lang.Override public int getSpeechModelVariantValue() { return speechModelVariant_; } /** * * *
     * The speech model used in speech to text.
     * `SPEECH_MODEL_VARIANT_UNSPECIFIED`, `USE_BEST_AVAILABLE` will be treated as
     * `USE_ENHANCED`. It can be overridden in
     * [AnalyzeContentRequest][google.cloud.dialogflow.v2.AnalyzeContentRequest]
     * and
     * [StreamingAnalyzeContentRequest][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest]
     * request. If enhanced model variant is specified and an enhanced version of
     * the specified model for the language does not exist, then it would emit an
     * error.
     * 
* * .google.cloud.dialogflow.v2.SpeechModelVariant speech_model_variant = 1; * * @param value The enum numeric value on the wire for speechModelVariant to set. * @return This builder for chaining. */ public Builder setSpeechModelVariantValue(int value) { speechModelVariant_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * * *
     * The speech model used in speech to text.
     * `SPEECH_MODEL_VARIANT_UNSPECIFIED`, `USE_BEST_AVAILABLE` will be treated as
     * `USE_ENHANCED`. It can be overridden in
     * [AnalyzeContentRequest][google.cloud.dialogflow.v2.AnalyzeContentRequest]
     * and
     * [StreamingAnalyzeContentRequest][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest]
     * request. If enhanced model variant is specified and an enhanced version of
     * the specified model for the language does not exist, then it would emit an
     * error.
     * 
* * .google.cloud.dialogflow.v2.SpeechModelVariant speech_model_variant = 1; * * @return The speechModelVariant. */ @java.lang.Override public com.google.cloud.dialogflow.v2.SpeechModelVariant getSpeechModelVariant() { com.google.cloud.dialogflow.v2.SpeechModelVariant result = com.google.cloud.dialogflow.v2.SpeechModelVariant.forNumber(speechModelVariant_); return result == null ? com.google.cloud.dialogflow.v2.SpeechModelVariant.UNRECOGNIZED : result; } /** * * *
     * The speech model used in speech to text.
     * `SPEECH_MODEL_VARIANT_UNSPECIFIED`, `USE_BEST_AVAILABLE` will be treated as
     * `USE_ENHANCED`. It can be overridden in
     * [AnalyzeContentRequest][google.cloud.dialogflow.v2.AnalyzeContentRequest]
     * and
     * [StreamingAnalyzeContentRequest][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest]
     * request. If enhanced model variant is specified and an enhanced version of
     * the specified model for the language does not exist, then it would emit an
     * error.
     * 
* * .google.cloud.dialogflow.v2.SpeechModelVariant speech_model_variant = 1; * * @param value The speechModelVariant to set. * @return This builder for chaining. */ public Builder setSpeechModelVariant(com.google.cloud.dialogflow.v2.SpeechModelVariant value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; speechModelVariant_ = value.getNumber(); onChanged(); return this; } /** * * *
     * The speech model used in speech to text.
     * `SPEECH_MODEL_VARIANT_UNSPECIFIED`, `USE_BEST_AVAILABLE` will be treated as
     * `USE_ENHANCED`. It can be overridden in
     * [AnalyzeContentRequest][google.cloud.dialogflow.v2.AnalyzeContentRequest]
     * and
     * [StreamingAnalyzeContentRequest][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest]
     * request. If enhanced model variant is specified and an enhanced version of
     * the specified model for the language does not exist, then it would emit an
     * error.
     * 
* * .google.cloud.dialogflow.v2.SpeechModelVariant speech_model_variant = 1; * * @return This builder for chaining. */ public Builder clearSpeechModelVariant() { bitField0_ = (bitField0_ & ~0x00000001); speechModelVariant_ = 0; onChanged(); return this; } private java.lang.Object model_ = ""; /** * * *
     * Which Speech model to select. Select the
     * model best suited to your domain to get best results. If a model is not
     * explicitly specified, then Dialogflow auto-selects a model based on other
     * parameters in the SpeechToTextConfig and Agent settings.
     * If enhanced speech model is enabled for the agent and an enhanced
     * version of the specified model for the language does not exist, then the
     * speech is recognized using the standard version of the specified model.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
     * for more details.
     * If you specify a model, the following models typically have the best
     * performance:
     *
     * - phone_call (best for Agent Assist and telephony)
     * - latest_short (best for Dialogflow non-telephony)
     * - command_and_search
     *
     * Leave this field unspecified to use
     * [Agent Speech
     * settings](https://cloud.google.com/dialogflow/cx/docs/concept/agent#settings-speech)
     * for model selection.
     * 
* * string model = 2; * * @return The model. */ public java.lang.String getModel() { java.lang.Object ref = model_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); model_ = s; return s; } else { return (java.lang.String) ref; } } /** * * *
     * Which Speech model to select. Select the
     * model best suited to your domain to get best results. If a model is not
     * explicitly specified, then Dialogflow auto-selects a model based on other
     * parameters in the SpeechToTextConfig and Agent settings.
     * If enhanced speech model is enabled for the agent and an enhanced
     * version of the specified model for the language does not exist, then the
     * speech is recognized using the standard version of the specified model.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
     * for more details.
     * If you specify a model, the following models typically have the best
     * performance:
     *
     * - phone_call (best for Agent Assist and telephony)
     * - latest_short (best for Dialogflow non-telephony)
     * - command_and_search
     *
     * Leave this field unspecified to use
     * [Agent Speech
     * settings](https://cloud.google.com/dialogflow/cx/docs/concept/agent#settings-speech)
     * for model selection.
     * 
* * string model = 2; * * @return The bytes for model. */ public com.google.protobuf.ByteString getModelBytes() { java.lang.Object ref = model_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); model_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * *
     * Which Speech model to select. Select the
     * model best suited to your domain to get best results. If a model is not
     * explicitly specified, then Dialogflow auto-selects a model based on other
     * parameters in the SpeechToTextConfig and Agent settings.
     * If enhanced speech model is enabled for the agent and an enhanced
     * version of the specified model for the language does not exist, then the
     * speech is recognized using the standard version of the specified model.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
     * for more details.
     * If you specify a model, the following models typically have the best
     * performance:
     *
     * - phone_call (best for Agent Assist and telephony)
     * - latest_short (best for Dialogflow non-telephony)
     * - command_and_search
     *
     * Leave this field unspecified to use
     * [Agent Speech
     * settings](https://cloud.google.com/dialogflow/cx/docs/concept/agent#settings-speech)
     * for model selection.
     * 
* * string model = 2; * * @param value The model to set. * @return This builder for chaining. */ public Builder setModel(java.lang.String value) { if (value == null) { throw new NullPointerException(); } model_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * * *
     * Which Speech model to select. Select the
     * model best suited to your domain to get best results. If a model is not
     * explicitly specified, then Dialogflow auto-selects a model based on other
     * parameters in the SpeechToTextConfig and Agent settings.
     * If enhanced speech model is enabled for the agent and an enhanced
     * version of the specified model for the language does not exist, then the
     * speech is recognized using the standard version of the specified model.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
     * for more details.
     * If you specify a model, the following models typically have the best
     * performance:
     *
     * - phone_call (best for Agent Assist and telephony)
     * - latest_short (best for Dialogflow non-telephony)
     * - command_and_search
     *
     * Leave this field unspecified to use
     * [Agent Speech
     * settings](https://cloud.google.com/dialogflow/cx/docs/concept/agent#settings-speech)
     * for model selection.
     * 
* * string model = 2; * * @return This builder for chaining. */ public Builder clearModel() { model_ = getDefaultInstance().getModel(); bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } /** * * *
     * Which Speech model to select. Select the
     * model best suited to your domain to get best results. If a model is not
     * explicitly specified, then Dialogflow auto-selects a model based on other
     * parameters in the SpeechToTextConfig and Agent settings.
     * If enhanced speech model is enabled for the agent and an enhanced
     * version of the specified model for the language does not exist, then the
     * speech is recognized using the standard version of the specified model.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#select-model)
     * for more details.
     * If you specify a model, the following models typically have the best
     * performance:
     *
     * - phone_call (best for Agent Assist and telephony)
     * - latest_short (best for Dialogflow non-telephony)
     * - command_and_search
     *
     * Leave this field unspecified to use
     * [Agent Speech
     * settings](https://cloud.google.com/dialogflow/cx/docs/concept/agent#settings-speech)
     * for model selection.
     * 
* * string model = 2; * * @param value The bytes for model to set. * @return This builder for chaining. */ public Builder setModelBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); model_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } private int audioEncoding_ = 0; /** * * *
     * Audio encoding of the audio content to process.
     * 
* * .google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 6; * * @return The enum numeric value on the wire for audioEncoding. */ @java.lang.Override public int getAudioEncodingValue() { return audioEncoding_; } /** * * *
     * Audio encoding of the audio content to process.
     * 
* * .google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 6; * * @param value The enum numeric value on the wire for audioEncoding to set. * @return This builder for chaining. */ public Builder setAudioEncodingValue(int value) { audioEncoding_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * * *
     * Audio encoding of the audio content to process.
     * 
* * .google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 6; * * @return The audioEncoding. */ @java.lang.Override public com.google.cloud.dialogflow.v2.AudioEncoding getAudioEncoding() { com.google.cloud.dialogflow.v2.AudioEncoding result = com.google.cloud.dialogflow.v2.AudioEncoding.forNumber(audioEncoding_); return result == null ? com.google.cloud.dialogflow.v2.AudioEncoding.UNRECOGNIZED : result; } /** * * *
     * Audio encoding of the audio content to process.
     * 
* * .google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 6; * * @param value The audioEncoding to set. * @return This builder for chaining. */ public Builder setAudioEncoding(com.google.cloud.dialogflow.v2.AudioEncoding value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; audioEncoding_ = value.getNumber(); onChanged(); return this; } /** * * *
     * Audio encoding of the audio content to process.
     * 
* * .google.cloud.dialogflow.v2.AudioEncoding audio_encoding = 6; * * @return This builder for chaining. */ public Builder clearAudioEncoding() { bitField0_ = (bitField0_ & ~0x00000004); audioEncoding_ = 0; onChanged(); return this; } private int sampleRateHertz_; /** * * *
     * Sample rate (in Hertz) of the audio content sent in the query.
     * Refer to [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
     * more details.
     * 
* * int32 sample_rate_hertz = 7; * * @return The sampleRateHertz. */ @java.lang.Override public int getSampleRateHertz() { return sampleRateHertz_; } /** * * *
     * Sample rate (in Hertz) of the audio content sent in the query.
     * Refer to [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
     * more details.
     * 
* * int32 sample_rate_hertz = 7; * * @param value The sampleRateHertz to set. * @return This builder for chaining. */ public Builder setSampleRateHertz(int value) { sampleRateHertz_ = value; bitField0_ |= 0x00000008; onChanged(); return this; } /** * * *
     * Sample rate (in Hertz) of the audio content sent in the query.
     * Refer to [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
     * more details.
     * 
* * int32 sample_rate_hertz = 7; * * @return This builder for chaining. */ public Builder clearSampleRateHertz() { bitField0_ = (bitField0_ & ~0x00000008); sampleRateHertz_ = 0; onChanged(); return this; } private java.lang.Object languageCode_ = ""; /** * * *
     * The language of the supplied audio. Dialogflow does not do
     * translations. See [Language
     * Support](https://cloud.google.com/dialogflow/docs/reference/language)
     * for a list of the currently supported language codes. Note that queries in
     * the same session do not necessarily need to specify the same language.
     * 
* * string language_code = 8; * * @return The languageCode. */ public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); languageCode_ = s; return s; } else { return (java.lang.String) ref; } } /** * * *
     * The language of the supplied audio. Dialogflow does not do
     * translations. See [Language
     * Support](https://cloud.google.com/dialogflow/docs/reference/language)
     * for a list of the currently supported language codes. Note that queries in
     * the same session do not necessarily need to specify the same language.
     * 
* * string language_code = 8; * * @return The bytes for languageCode. */ public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); languageCode_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * *
     * The language of the supplied audio. Dialogflow does not do
     * translations. See [Language
     * Support](https://cloud.google.com/dialogflow/docs/reference/language)
     * for a list of the currently supported language codes. Note that queries in
     * the same session do not necessarily need to specify the same language.
     * 
* * string language_code = 8; * * @param value The languageCode to set. * @return This builder for chaining. */ public Builder setLanguageCode(java.lang.String value) { if (value == null) { throw new NullPointerException(); } languageCode_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * * *
     * The language of the supplied audio. Dialogflow does not do
     * translations. See [Language
     * Support](https://cloud.google.com/dialogflow/docs/reference/language)
     * for a list of the currently supported language codes. Note that queries in
     * the same session do not necessarily need to specify the same language.
     * 
* * string language_code = 8; * * @return This builder for chaining. */ public Builder clearLanguageCode() { languageCode_ = getDefaultInstance().getLanguageCode(); bitField0_ = (bitField0_ & ~0x00000010); onChanged(); return this; } /** * * *
     * The language of the supplied audio. Dialogflow does not do
     * translations. See [Language
     * Support](https://cloud.google.com/dialogflow/docs/reference/language)
     * for a list of the currently supported language codes. Note that queries in
     * the same session do not necessarily need to specify the same language.
     * 
* * string language_code = 8; * * @param value The bytes for languageCode to set. * @return This builder for chaining. */ public Builder setLanguageCodeBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); languageCode_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } private boolean enableWordInfo_; /** * * *
     * If `true`, Dialogflow returns
     * [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
     * [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
     * with information about the recognized speech words, e.g. start and end time
     * offsets. If false or unspecified, Speech doesn't return any word-level
     * information.
     * 
* * bool enable_word_info = 9; * * @return The enableWordInfo. */ @java.lang.Override public boolean getEnableWordInfo() { return enableWordInfo_; } /** * * *
     * If `true`, Dialogflow returns
     * [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
     * [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
     * with information about the recognized speech words, e.g. start and end time
     * offsets. If false or unspecified, Speech doesn't return any word-level
     * information.
     * 
* * bool enable_word_info = 9; * * @param value The enableWordInfo to set. * @return This builder for chaining. */ public Builder setEnableWordInfo(boolean value) { enableWordInfo_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** * * *
     * If `true`, Dialogflow returns
     * [SpeechWordInfo][google.cloud.dialogflow.v2.SpeechWordInfo] in
     * [StreamingRecognitionResult][google.cloud.dialogflow.v2.StreamingRecognitionResult]
     * with information about the recognized speech words, e.g. start and end time
     * offsets. If false or unspecified, Speech doesn't return any word-level
     * information.
     * 
* * bool enable_word_info = 9; * * @return This builder for chaining. */ public Builder clearEnableWordInfo() { bitField0_ = (bitField0_ & ~0x00000020); enableWordInfo_ = false; onChanged(); return this; } private boolean useTimeoutBasedEndpointing_; /** * * *
     * Use timeout based endpointing, interpreting endpointer sensitivy as
     * seconds of timeout value.
     * 
* * bool use_timeout_based_endpointing = 11; * * @return The useTimeoutBasedEndpointing. */ @java.lang.Override public boolean getUseTimeoutBasedEndpointing() { return useTimeoutBasedEndpointing_; } /** * * *
     * Use timeout based endpointing, interpreting endpointer sensitivy as
     * seconds of timeout value.
     * 
* * bool use_timeout_based_endpointing = 11; * * @param value The useTimeoutBasedEndpointing to set. * @return This builder for chaining. */ public Builder setUseTimeoutBasedEndpointing(boolean value) { useTimeoutBasedEndpointing_ = value; bitField0_ |= 0x00000040; onChanged(); return this; } /** * * *
     * Use timeout based endpointing, interpreting endpointer sensitivy as
     * seconds of timeout value.
     * 
* * bool use_timeout_based_endpointing = 11; * * @return This builder for chaining. */ public Builder clearUseTimeoutBasedEndpointing() { bitField0_ = (bitField0_ & ~0x00000040); useTimeoutBasedEndpointing_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.dialogflow.v2.SpeechToTextConfig) } // @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.SpeechToTextConfig) private static final com.google.cloud.dialogflow.v2.SpeechToTextConfig DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.dialogflow.v2.SpeechToTextConfig(); } public static com.google.cloud.dialogflow.v2.SpeechToTextConfig getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override public SpeechToTextConfig parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (com.google.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.dialogflow.v2.SpeechToTextConfig getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy