All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig Maven / Gradle / Ivy

The newest version!
/*
 * Copyright 2024 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/dialogflow/cx/v3beta1/audio_config.proto

// Protobuf Java Version: 3.25.5
package com.google.cloud.dialogflow.cx.v3beta1;

/**
 *
 *
 * 
 * Instructs the speech recognizer on how to process the audio content.
 * 
* * Protobuf type {@code google.cloud.dialogflow.cx.v3beta1.InputAudioConfig} */ public final class InputAudioConfig extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.dialogflow.cx.v3beta1.InputAudioConfig) InputAudioConfigOrBuilder { private static final long serialVersionUID = 0L; // Use InputAudioConfig.newBuilder() to construct. private InputAudioConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private InputAudioConfig() { audioEncoding_ = 0; phraseHints_ = com.google.protobuf.LazyStringArrayList.emptyList(); model_ = ""; modelVariant_ = 0; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new InputAudioConfig(); } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.dialogflow.cx.v3beta1.AudioConfigProto .internal_static_google_cloud_dialogflow_cx_v3beta1_InputAudioConfig_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.dialogflow.cx.v3beta1.AudioConfigProto .internal_static_google_cloud_dialogflow_cx_v3beta1_InputAudioConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.class, com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.Builder.class); } private int bitField0_; public static final int AUDIO_ENCODING_FIELD_NUMBER = 1; private int audioEncoding_ = 0; /** * * *
   * Required. Audio encoding of the audio content to process.
   * 
* * * .google.cloud.dialogflow.cx.v3beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED]; * * * @return The enum numeric value on the wire for audioEncoding. */ @java.lang.Override public int getAudioEncodingValue() { return audioEncoding_; } /** * * *
   * Required. Audio encoding of the audio content to process.
   * 
* * * .google.cloud.dialogflow.cx.v3beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED]; * * * @return The audioEncoding. */ @java.lang.Override public com.google.cloud.dialogflow.cx.v3beta1.AudioEncoding getAudioEncoding() { com.google.cloud.dialogflow.cx.v3beta1.AudioEncoding result = com.google.cloud.dialogflow.cx.v3beta1.AudioEncoding.forNumber(audioEncoding_); return result == null ? com.google.cloud.dialogflow.cx.v3beta1.AudioEncoding.UNRECOGNIZED : result; } public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 2; private int sampleRateHertz_ = 0; /** * * *
   * Sample rate (in Hertz) of the audio content sent in the query.
   * Refer to
   * [Cloud Speech API
   * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
   * more details.
   * 
* * int32 sample_rate_hertz = 2; * * @return The sampleRateHertz. */ @java.lang.Override public int getSampleRateHertz() { return sampleRateHertz_; } public static final int ENABLE_WORD_INFO_FIELD_NUMBER = 13; private boolean enableWordInfo_ = false; /** * * *
   * Optional. If `true`, Dialogflow returns
   * [SpeechWordInfo][google.cloud.dialogflow.cx.v3beta1.SpeechWordInfo] in
   * [StreamingRecognitionResult][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult]
   * with information about the recognized speech words, e.g. start and end time
   * offsets. If false or unspecified, Speech doesn't return any word-level
   * information.
   * 
* * bool enable_word_info = 13; * * @return The enableWordInfo. */ @java.lang.Override public boolean getEnableWordInfo() { return enableWordInfo_; } public static final int PHRASE_HINTS_FIELD_NUMBER = 4; @SuppressWarnings("serial") private com.google.protobuf.LazyStringArrayList phraseHints_ = com.google.protobuf.LazyStringArrayList.emptyList(); /** * * *
   * Optional. A list of strings containing words and phrases that the speech
   * recognizer should recognize with higher likelihood.
   *
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * 
* * repeated string phrase_hints = 4; * * @return A list containing the phraseHints. */ public com.google.protobuf.ProtocolStringList getPhraseHintsList() { return phraseHints_; } /** * * *
   * Optional. A list of strings containing words and phrases that the speech
   * recognizer should recognize with higher likelihood.
   *
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * 
* * repeated string phrase_hints = 4; * * @return The count of phraseHints. */ public int getPhraseHintsCount() { return phraseHints_.size(); } /** * * *
   * Optional. A list of strings containing words and phrases that the speech
   * recognizer should recognize with higher likelihood.
   *
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * 
* * repeated string phrase_hints = 4; * * @param index The index of the element to return. * @return The phraseHints at the given index. */ public java.lang.String getPhraseHints(int index) { return phraseHints_.get(index); } /** * * *
   * Optional. A list of strings containing words and phrases that the speech
   * recognizer should recognize with higher likelihood.
   *
   * See [the Cloud Speech
   * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
   * for more details.
   * 
* * repeated string phrase_hints = 4; * * @param index The index of the value to return. * @return The bytes of the phraseHints at the given index. */ public com.google.protobuf.ByteString getPhraseHintsBytes(int index) { return phraseHints_.getByteString(index); } public static final int MODEL_FIELD_NUMBER = 7; @SuppressWarnings("serial") private volatile java.lang.Object model_ = ""; /** * * *
   * Optional. Which Speech model to select for the given request.
   * For more information, see
   * [Speech
   * models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
   * 
* * string model = 7; * * @return The model. */ @java.lang.Override public java.lang.String getModel() { java.lang.Object ref = model_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); model_ = s; return s; } } /** * * *
   * Optional. Which Speech model to select for the given request.
   * For more information, see
   * [Speech
   * models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
   * 
* * string model = 7; * * @return The bytes for model. */ @java.lang.Override public com.google.protobuf.ByteString getModelBytes() { java.lang.Object ref = model_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); model_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int MODEL_VARIANT_FIELD_NUMBER = 10; private int modelVariant_ = 0; /** * * *
   * Optional. Which variant of the [Speech
   * model][google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.model] to use.
   * 
* * .google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant model_variant = 10; * * @return The enum numeric value on the wire for modelVariant. */ @java.lang.Override public int getModelVariantValue() { return modelVariant_; } /** * * *
   * Optional. Which variant of the [Speech
   * model][google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.model] to use.
   * 
* * .google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant model_variant = 10; * * @return The modelVariant. */ @java.lang.Override public com.google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant getModelVariant() { com.google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant result = com.google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant.forNumber(modelVariant_); return result == null ? com.google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant.UNRECOGNIZED : result; } public static final int SINGLE_UTTERANCE_FIELD_NUMBER = 8; private boolean singleUtterance_ = false; /** * * *
   * Optional. If `false` (default), recognition does not cease until the
   * client closes the stream.
   * If `true`, the recognizer will detect a single spoken utterance in input
   * audio. Recognition ceases when it detects the audio's voice has
   * stopped or paused. In this case, once a detected intent is received, the
   * client should close the stream and start a new request with a new stream as
   * needed.
   * Note: This setting is relevant only for streaming methods.
   * 
* * bool single_utterance = 8; * * @return The singleUtterance. */ @java.lang.Override public boolean getSingleUtterance() { return singleUtterance_; } public static final int BARGE_IN_CONFIG_FIELD_NUMBER = 15; private com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig bargeInConfig_; /** * * *
   * Configuration of barge-in behavior during the streaming of input audio.
   * 
* * .google.cloud.dialogflow.cx.v3beta1.BargeInConfig barge_in_config = 15; * * @return Whether the bargeInConfig field is set. */ @java.lang.Override public boolean hasBargeInConfig() { return ((bitField0_ & 0x00000001) != 0); } /** * * *
   * Configuration of barge-in behavior during the streaming of input audio.
   * 
* * .google.cloud.dialogflow.cx.v3beta1.BargeInConfig barge_in_config = 15; * * @return The bargeInConfig. */ @java.lang.Override public com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig getBargeInConfig() { return bargeInConfig_ == null ? com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig.getDefaultInstance() : bargeInConfig_; } /** * * *
   * Configuration of barge-in behavior during the streaming of input audio.
   * 
* * .google.cloud.dialogflow.cx.v3beta1.BargeInConfig barge_in_config = 15; */ @java.lang.Override public com.google.cloud.dialogflow.cx.v3beta1.BargeInConfigOrBuilder getBargeInConfigOrBuilder() { return bargeInConfig_ == null ? com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig.getDefaultInstance() : bargeInConfig_; } public static final int OPT_OUT_CONFORMER_MODEL_MIGRATION_FIELD_NUMBER = 26; private boolean optOutConformerModelMigration_ = false; /** * * *
   * If `true`, the request will opt out for STT conformer model migration.
   * This field will be deprecated once force migration takes place in June
   * 2024. Please refer to [Dialogflow CX Speech model
   * migration](https://cloud.google.com/dialogflow/cx/docs/concept/speech-model-migration).
   * 
* * bool opt_out_conformer_model_migration = 26; * * @return The optOutConformerModelMigration. */ @java.lang.Override public boolean getOptOutConformerModelMigration() { return optOutConformerModelMigration_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (audioEncoding_ != com.google.cloud.dialogflow.cx.v3beta1.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED .getNumber()) { output.writeEnum(1, audioEncoding_); } if (sampleRateHertz_ != 0) { output.writeInt32(2, sampleRateHertz_); } for (int i = 0; i < phraseHints_.size(); i++) { com.google.protobuf.GeneratedMessageV3.writeString(output, 4, phraseHints_.getRaw(i)); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 7, model_); } if (singleUtterance_ != false) { output.writeBool(8, singleUtterance_); } if (modelVariant_ != com.google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant .SPEECH_MODEL_VARIANT_UNSPECIFIED .getNumber()) { output.writeEnum(10, modelVariant_); } if (enableWordInfo_ != false) { output.writeBool(13, enableWordInfo_); } if (((bitField0_ & 0x00000001) != 0)) { output.writeMessage(15, getBargeInConfig()); } if (optOutConformerModelMigration_ != false) { output.writeBool(26, optOutConformerModelMigration_); } getUnknownFields().writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (audioEncoding_ != com.google.cloud.dialogflow.cx.v3beta1.AudioEncoding.AUDIO_ENCODING_UNSPECIFIED .getNumber()) { size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, audioEncoding_); } if (sampleRateHertz_ != 0) { size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, sampleRateHertz_); } { int dataSize = 0; for (int i = 0; i < phraseHints_.size(); i++) { dataSize += computeStringSizeNoTag(phraseHints_.getRaw(i)); } size += dataSize; size += 1 * getPhraseHintsList().size(); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, model_); } if (singleUtterance_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, singleUtterance_); } if (modelVariant_ != com.google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant .SPEECH_MODEL_VARIANT_UNSPECIFIED .getNumber()) { size += com.google.protobuf.CodedOutputStream.computeEnumSize(10, modelVariant_); } if (enableWordInfo_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(13, enableWordInfo_); } if (((bitField0_ & 0x00000001) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(15, getBargeInConfig()); } if (optOutConformerModelMigration_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(26, optOutConformerModelMigration_); } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig)) { return super.equals(obj); } com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig other = (com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig) obj; if (audioEncoding_ != other.audioEncoding_) return false; if (getSampleRateHertz() != other.getSampleRateHertz()) return false; if (getEnableWordInfo() != other.getEnableWordInfo()) return false; if (!getPhraseHintsList().equals(other.getPhraseHintsList())) return false; if (!getModel().equals(other.getModel())) return false; if (modelVariant_ != other.modelVariant_) return false; if (getSingleUtterance() != other.getSingleUtterance()) return false; if (hasBargeInConfig() != other.hasBargeInConfig()) return false; if (hasBargeInConfig()) { if (!getBargeInConfig().equals(other.getBargeInConfig())) return false; } if (getOptOutConformerModelMigration() != other.getOptOutConformerModelMigration()) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + AUDIO_ENCODING_FIELD_NUMBER; hash = (53 * hash) + audioEncoding_; hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER; hash = (53 * hash) + getSampleRateHertz(); hash = (37 * hash) + ENABLE_WORD_INFO_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableWordInfo()); if (getPhraseHintsCount() > 0) { hash = (37 * hash) + PHRASE_HINTS_FIELD_NUMBER; hash = (53 * hash) + getPhraseHintsList().hashCode(); } hash = (37 * hash) + MODEL_FIELD_NUMBER; hash = (53 * hash) + getModel().hashCode(); hash = (37 * hash) + MODEL_VARIANT_FIELD_NUMBER; hash = (53 * hash) + modelVariant_; hash = (37 * hash) + SINGLE_UTTERANCE_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSingleUtterance()); if (hasBargeInConfig()) { hash = (37 * hash) + BARGE_IN_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getBargeInConfig().hashCode(); } hash = (37 * hash) + OPT_OUT_CONFORMER_MODEL_MIGRATION_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getOptOutConformerModelMigration()); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig parseFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder( com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * *
   * Instructs the speech recognizer on how to process the audio content.
   * 
* * Protobuf type {@code google.cloud.dialogflow.cx.v3beta1.InputAudioConfig} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.cloud.dialogflow.cx.v3beta1.InputAudioConfig) com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfigOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.dialogflow.cx.v3beta1.AudioConfigProto .internal_static_google_cloud_dialogflow_cx_v3beta1_InputAudioConfig_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.dialogflow.cx.v3beta1.AudioConfigProto .internal_static_google_cloud_dialogflow_cx_v3beta1_InputAudioConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.class, com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.Builder.class); } // Construct using com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { getBargeInConfigFieldBuilder(); } } @java.lang.Override public Builder clear() { super.clear(); bitField0_ = 0; audioEncoding_ = 0; sampleRateHertz_ = 0; enableWordInfo_ = false; phraseHints_ = com.google.protobuf.LazyStringArrayList.emptyList(); model_ = ""; modelVariant_ = 0; singleUtterance_ = false; bargeInConfig_ = null; if (bargeInConfigBuilder_ != null) { bargeInConfigBuilder_.dispose(); bargeInConfigBuilder_ = null; } optOutConformerModelMigration_ = false; return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.dialogflow.cx.v3beta1.AudioConfigProto .internal_static_google_cloud_dialogflow_cx_v3beta1_InputAudioConfig_descriptor; } @java.lang.Override public com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig getDefaultInstanceForType() { return com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.getDefaultInstance(); } @java.lang.Override public com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig build() { com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig buildPartial() { com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig result = new com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig(this); if (bitField0_ != 0) { buildPartial0(result); } onBuilt(); return result; } private void buildPartial0(com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig result) { int from_bitField0_ = bitField0_; if (((from_bitField0_ & 0x00000001) != 0)) { result.audioEncoding_ = audioEncoding_; } if (((from_bitField0_ & 0x00000002) != 0)) { result.sampleRateHertz_ = sampleRateHertz_; } if (((from_bitField0_ & 0x00000004) != 0)) { result.enableWordInfo_ = enableWordInfo_; } if (((from_bitField0_ & 0x00000008) != 0)) { phraseHints_.makeImmutable(); result.phraseHints_ = phraseHints_; } if (((from_bitField0_ & 0x00000010) != 0)) { result.model_ = model_; } if (((from_bitField0_ & 0x00000020) != 0)) { result.modelVariant_ = modelVariant_; } if (((from_bitField0_ & 0x00000040) != 0)) { result.singleUtterance_ = singleUtterance_; } int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000080) != 0)) { result.bargeInConfig_ = bargeInConfigBuilder_ == null ? bargeInConfig_ : bargeInConfigBuilder_.build(); to_bitField0_ |= 0x00000001; } if (((from_bitField0_ & 0x00000100) != 0)) { result.optOutConformerModelMigration_ = optOutConformerModelMigration_; } result.bitField0_ |= to_bitField0_; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig) { return mergeFrom((com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig other) { if (other == com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.getDefaultInstance()) return this; if (other.audioEncoding_ != 0) { setAudioEncodingValue(other.getAudioEncodingValue()); } if (other.getSampleRateHertz() != 0) { setSampleRateHertz(other.getSampleRateHertz()); } if (other.getEnableWordInfo() != false) { setEnableWordInfo(other.getEnableWordInfo()); } if (!other.phraseHints_.isEmpty()) { if (phraseHints_.isEmpty()) { phraseHints_ = other.phraseHints_; bitField0_ |= 0x00000008; } else { ensurePhraseHintsIsMutable(); phraseHints_.addAll(other.phraseHints_); } onChanged(); } if (!other.getModel().isEmpty()) { model_ = other.model_; bitField0_ |= 0x00000010; onChanged(); } if (other.modelVariant_ != 0) { setModelVariantValue(other.getModelVariantValue()); } if (other.getSingleUtterance() != false) { setSingleUtterance(other.getSingleUtterance()); } if (other.hasBargeInConfig()) { mergeBargeInConfig(other.getBargeInConfig()); } if (other.getOptOutConformerModelMigration() != false) { setOptOutConformerModelMigration(other.getOptOutConformerModelMigration()); } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { audioEncoding_ = input.readEnum(); bitField0_ |= 0x00000001; break; } // case 8 case 16: { sampleRateHertz_ = input.readInt32(); bitField0_ |= 0x00000002; break; } // case 16 case 34: { java.lang.String s = input.readStringRequireUtf8(); ensurePhraseHintsIsMutable(); phraseHints_.add(s); break; } // case 34 case 58: { model_ = input.readStringRequireUtf8(); bitField0_ |= 0x00000010; break; } // case 58 case 64: { singleUtterance_ = input.readBool(); bitField0_ |= 0x00000040; break; } // case 64 case 80: { modelVariant_ = input.readEnum(); bitField0_ |= 0x00000020; break; } // case 80 case 104: { enableWordInfo_ = input.readBool(); bitField0_ |= 0x00000004; break; } // case 104 case 122: { input.readMessage(getBargeInConfigFieldBuilder().getBuilder(), extensionRegistry); bitField0_ |= 0x00000080; break; } // case 122 case 208: { optOutConformerModelMigration_ = input.readBool(); bitField0_ |= 0x00000100; break; } // case 208 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { done = true; // was an endgroup tag } break; } // default: } // switch (tag) } // while (!done) } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.unwrapIOException(); } finally { onChanged(); } // finally return this; } private int bitField0_; private int audioEncoding_ = 0; /** * * *
     * Required. Audio encoding of the audio content to process.
     * 
* * * .google.cloud.dialogflow.cx.v3beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED]; * * * @return The enum numeric value on the wire for audioEncoding. */ @java.lang.Override public int getAudioEncodingValue() { return audioEncoding_; } /** * * *
     * Required. Audio encoding of the audio content to process.
     * 
* * * .google.cloud.dialogflow.cx.v3beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED]; * * * @param value The enum numeric value on the wire for audioEncoding to set. * @return This builder for chaining. */ public Builder setAudioEncodingValue(int value) { audioEncoding_ = value; bitField0_ |= 0x00000001; onChanged(); return this; } /** * * *
     * Required. Audio encoding of the audio content to process.
     * 
* * * .google.cloud.dialogflow.cx.v3beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED]; * * * @return The audioEncoding. */ @java.lang.Override public com.google.cloud.dialogflow.cx.v3beta1.AudioEncoding getAudioEncoding() { com.google.cloud.dialogflow.cx.v3beta1.AudioEncoding result = com.google.cloud.dialogflow.cx.v3beta1.AudioEncoding.forNumber(audioEncoding_); return result == null ? com.google.cloud.dialogflow.cx.v3beta1.AudioEncoding.UNRECOGNIZED : result; } /** * * *
     * Required. Audio encoding of the audio content to process.
     * 
* * * .google.cloud.dialogflow.cx.v3beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED]; * * * @param value The audioEncoding to set. * @return This builder for chaining. */ public Builder setAudioEncoding(com.google.cloud.dialogflow.cx.v3beta1.AudioEncoding value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; audioEncoding_ = value.getNumber(); onChanged(); return this; } /** * * *
     * Required. Audio encoding of the audio content to process.
     * 
* * * .google.cloud.dialogflow.cx.v3beta1.AudioEncoding audio_encoding = 1 [(.google.api.field_behavior) = REQUIRED]; * * * @return This builder for chaining. */ public Builder clearAudioEncoding() { bitField0_ = (bitField0_ & ~0x00000001); audioEncoding_ = 0; onChanged(); return this; } private int sampleRateHertz_; /** * * *
     * Sample rate (in Hertz) of the audio content sent in the query.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
     * more details.
     * 
* * int32 sample_rate_hertz = 2; * * @return The sampleRateHertz. */ @java.lang.Override public int getSampleRateHertz() { return sampleRateHertz_; } /** * * *
     * Sample rate (in Hertz) of the audio content sent in the query.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
     * more details.
     * 
* * int32 sample_rate_hertz = 2; * * @param value The sampleRateHertz to set. * @return This builder for chaining. */ public Builder setSampleRateHertz(int value) { sampleRateHertz_ = value; bitField0_ |= 0x00000002; onChanged(); return this; } /** * * *
     * Sample rate (in Hertz) of the audio content sent in the query.
     * Refer to
     * [Cloud Speech API
     * documentation](https://cloud.google.com/speech-to-text/docs/basics) for
     * more details.
     * 
* * int32 sample_rate_hertz = 2; * * @return This builder for chaining. */ public Builder clearSampleRateHertz() { bitField0_ = (bitField0_ & ~0x00000002); sampleRateHertz_ = 0; onChanged(); return this; } private boolean enableWordInfo_; /** * * *
     * Optional. If `true`, Dialogflow returns
     * [SpeechWordInfo][google.cloud.dialogflow.cx.v3beta1.SpeechWordInfo] in
     * [StreamingRecognitionResult][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult]
     * with information about the recognized speech words, e.g. start and end time
     * offsets. If false or unspecified, Speech doesn't return any word-level
     * information.
     * 
* * bool enable_word_info = 13; * * @return The enableWordInfo. */ @java.lang.Override public boolean getEnableWordInfo() { return enableWordInfo_; } /** * * *
     * Optional. If `true`, Dialogflow returns
     * [SpeechWordInfo][google.cloud.dialogflow.cx.v3beta1.SpeechWordInfo] in
     * [StreamingRecognitionResult][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult]
     * with information about the recognized speech words, e.g. start and end time
     * offsets. If false or unspecified, Speech doesn't return any word-level
     * information.
     * 
* * bool enable_word_info = 13; * * @param value The enableWordInfo to set. * @return This builder for chaining. */ public Builder setEnableWordInfo(boolean value) { enableWordInfo_ = value; bitField0_ |= 0x00000004; onChanged(); return this; } /** * * *
     * Optional. If `true`, Dialogflow returns
     * [SpeechWordInfo][google.cloud.dialogflow.cx.v3beta1.SpeechWordInfo] in
     * [StreamingRecognitionResult][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult]
     * with information about the recognized speech words, e.g. start and end time
     * offsets. If false or unspecified, Speech doesn't return any word-level
     * information.
     * 
* * bool enable_word_info = 13; * * @return This builder for chaining. */ public Builder clearEnableWordInfo() { bitField0_ = (bitField0_ & ~0x00000004); enableWordInfo_ = false; onChanged(); return this; } private com.google.protobuf.LazyStringArrayList phraseHints_ = com.google.protobuf.LazyStringArrayList.emptyList(); private void ensurePhraseHintsIsMutable() { if (!phraseHints_.isModifiable()) { phraseHints_ = new com.google.protobuf.LazyStringArrayList(phraseHints_); } bitField0_ |= 0x00000008; } /** * * *
     * Optional. A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     *
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * 
* * repeated string phrase_hints = 4; * * @return A list containing the phraseHints. */ public com.google.protobuf.ProtocolStringList getPhraseHintsList() { phraseHints_.makeImmutable(); return phraseHints_; } /** * * *
     * Optional. A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     *
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * 
* * repeated string phrase_hints = 4; * * @return The count of phraseHints. */ public int getPhraseHintsCount() { return phraseHints_.size(); } /** * * *
     * Optional. A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     *
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * 
* * repeated string phrase_hints = 4; * * @param index The index of the element to return. * @return The phraseHints at the given index. */ public java.lang.String getPhraseHints(int index) { return phraseHints_.get(index); } /** * * *
     * Optional. A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     *
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * 
* * repeated string phrase_hints = 4; * * @param index The index of the value to return. * @return The bytes of the phraseHints at the given index. */ public com.google.protobuf.ByteString getPhraseHintsBytes(int index) { return phraseHints_.getByteString(index); } /** * * *
     * Optional. A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     *
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * 
* * repeated string phrase_hints = 4; * * @param index The index to set the value at. * @param value The phraseHints to set. * @return This builder for chaining. */ public Builder setPhraseHints(int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensurePhraseHintsIsMutable(); phraseHints_.set(index, value); bitField0_ |= 0x00000008; onChanged(); return this; } /** * * *
     * Optional. A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     *
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * 
* * repeated string phrase_hints = 4; * * @param value The phraseHints to add. * @return This builder for chaining. */ public Builder addPhraseHints(java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensurePhraseHintsIsMutable(); phraseHints_.add(value); bitField0_ |= 0x00000008; onChanged(); return this; } /** * * *
     * Optional. A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     *
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * 
* * repeated string phrase_hints = 4; * * @param values The phraseHints to add. * @return This builder for chaining. */ public Builder addAllPhraseHints(java.lang.Iterable values) { ensurePhraseHintsIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll(values, phraseHints_); bitField0_ |= 0x00000008; onChanged(); return this; } /** * * *
     * Optional. A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     *
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * 
* * repeated string phrase_hints = 4; * * @return This builder for chaining. */ public Builder clearPhraseHints() { phraseHints_ = com.google.protobuf.LazyStringArrayList.emptyList(); bitField0_ = (bitField0_ & ~0x00000008); ; onChanged(); return this; } /** * * *
     * Optional. A list of strings containing words and phrases that the speech
     * recognizer should recognize with higher likelihood.
     *
     * See [the Cloud Speech
     * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
     * for more details.
     * 
* * repeated string phrase_hints = 4; * * @param value The bytes of the phraseHints to add. * @return This builder for chaining. */ public Builder addPhraseHintsBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); ensurePhraseHintsIsMutable(); phraseHints_.add(value); bitField0_ |= 0x00000008; onChanged(); return this; } private java.lang.Object model_ = ""; /** * * *
     * Optional. Which Speech model to select for the given request.
     * For more information, see
     * [Speech
     * models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
     * 
* * string model = 7; * * @return The model. */ public java.lang.String getModel() { java.lang.Object ref = model_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); model_ = s; return s; } else { return (java.lang.String) ref; } } /** * * *
     * Optional. Which Speech model to select for the given request.
     * For more information, see
     * [Speech
     * models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
     * 
* * string model = 7; * * @return The bytes for model. */ public com.google.protobuf.ByteString getModelBytes() { java.lang.Object ref = model_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); model_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * *
     * Optional. Which Speech model to select for the given request.
     * For more information, see
     * [Speech
     * models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
     * 
* * string model = 7; * * @param value The model to set. * @return This builder for chaining. */ public Builder setModel(java.lang.String value) { if (value == null) { throw new NullPointerException(); } model_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } /** * * *
     * Optional. Which Speech model to select for the given request.
     * For more information, see
     * [Speech
     * models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
     * 
* * string model = 7; * * @return This builder for chaining. */ public Builder clearModel() { model_ = getDefaultInstance().getModel(); bitField0_ = (bitField0_ & ~0x00000010); onChanged(); return this; } /** * * *
     * Optional. Which Speech model to select for the given request.
     * For more information, see
     * [Speech
     * models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models).
     * 
* * string model = 7; * * @param value The bytes for model to set. * @return This builder for chaining. */ public Builder setModelBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); model_ = value; bitField0_ |= 0x00000010; onChanged(); return this; } private int modelVariant_ = 0; /** * * *
     * Optional. Which variant of the [Speech
     * model][google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.model] to use.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant model_variant = 10; * * @return The enum numeric value on the wire for modelVariant. */ @java.lang.Override public int getModelVariantValue() { return modelVariant_; } /** * * *
     * Optional. Which variant of the [Speech
     * model][google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.model] to use.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant model_variant = 10; * * @param value The enum numeric value on the wire for modelVariant to set. * @return This builder for chaining. */ public Builder setModelVariantValue(int value) { modelVariant_ = value; bitField0_ |= 0x00000020; onChanged(); return this; } /** * * *
     * Optional. Which variant of the [Speech
     * model][google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.model] to use.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant model_variant = 10; * * @return The modelVariant. */ @java.lang.Override public com.google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant getModelVariant() { com.google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant result = com.google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant.forNumber(modelVariant_); return result == null ? com.google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant.UNRECOGNIZED : result; } /** * * *
     * Optional. Which variant of the [Speech
     * model][google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.model] to use.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant model_variant = 10; * * @param value The modelVariant to set. * @return This builder for chaining. */ public Builder setModelVariant( com.google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000020; modelVariant_ = value.getNumber(); onChanged(); return this; } /** * * *
     * Optional. Which variant of the [Speech
     * model][google.cloud.dialogflow.cx.v3beta1.InputAudioConfig.model] to use.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.SpeechModelVariant model_variant = 10; * * @return This builder for chaining. */ public Builder clearModelVariant() { bitField0_ = (bitField0_ & ~0x00000020); modelVariant_ = 0; onChanged(); return this; } private boolean singleUtterance_; /** * * *
     * Optional. If `false` (default), recognition does not cease until the
     * client closes the stream.
     * If `true`, the recognizer will detect a single spoken utterance in input
     * audio. Recognition ceases when it detects the audio's voice has
     * stopped or paused. In this case, once a detected intent is received, the
     * client should close the stream and start a new request with a new stream as
     * needed.
     * Note: This setting is relevant only for streaming methods.
     * 
* * bool single_utterance = 8; * * @return The singleUtterance. */ @java.lang.Override public boolean getSingleUtterance() { return singleUtterance_; } /** * * *
     * Optional. If `false` (default), recognition does not cease until the
     * client closes the stream.
     * If `true`, the recognizer will detect a single spoken utterance in input
     * audio. Recognition ceases when it detects the audio's voice has
     * stopped or paused. In this case, once a detected intent is received, the
     * client should close the stream and start a new request with a new stream as
     * needed.
     * Note: This setting is relevant only for streaming methods.
     * 
* * bool single_utterance = 8; * * @param value The singleUtterance to set. * @return This builder for chaining. */ public Builder setSingleUtterance(boolean value) { singleUtterance_ = value; bitField0_ |= 0x00000040; onChanged(); return this; } /** * * *
     * Optional. If `false` (default), recognition does not cease until the
     * client closes the stream.
     * If `true`, the recognizer will detect a single spoken utterance in input
     * audio. Recognition ceases when it detects the audio's voice has
     * stopped or paused. In this case, once a detected intent is received, the
     * client should close the stream and start a new request with a new stream as
     * needed.
     * Note: This setting is relevant only for streaming methods.
     * 
* * bool single_utterance = 8; * * @return This builder for chaining. */ public Builder clearSingleUtterance() { bitField0_ = (bitField0_ & ~0x00000040); singleUtterance_ = false; onChanged(); return this; } private com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig bargeInConfig_; private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig, com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig.Builder, com.google.cloud.dialogflow.cx.v3beta1.BargeInConfigOrBuilder> bargeInConfigBuilder_; /** * * *
     * Configuration of barge-in behavior during the streaming of input audio.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.BargeInConfig barge_in_config = 15; * * @return Whether the bargeInConfig field is set. */ public boolean hasBargeInConfig() { return ((bitField0_ & 0x00000080) != 0); } /** * * *
     * Configuration of barge-in behavior during the streaming of input audio.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.BargeInConfig barge_in_config = 15; * * @return The bargeInConfig. */ public com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig getBargeInConfig() { if (bargeInConfigBuilder_ == null) { return bargeInConfig_ == null ? com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig.getDefaultInstance() : bargeInConfig_; } else { return bargeInConfigBuilder_.getMessage(); } } /** * * *
     * Configuration of barge-in behavior during the streaming of input audio.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.BargeInConfig barge_in_config = 15; */ public Builder setBargeInConfig(com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig value) { if (bargeInConfigBuilder_ == null) { if (value == null) { throw new NullPointerException(); } bargeInConfig_ = value; } else { bargeInConfigBuilder_.setMessage(value); } bitField0_ |= 0x00000080; onChanged(); return this; } /** * * *
     * Configuration of barge-in behavior during the streaming of input audio.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.BargeInConfig barge_in_config = 15; */ public Builder setBargeInConfig( com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig.Builder builderForValue) { if (bargeInConfigBuilder_ == null) { bargeInConfig_ = builderForValue.build(); } else { bargeInConfigBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000080; onChanged(); return this; } /** * * *
     * Configuration of barge-in behavior during the streaming of input audio.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.BargeInConfig barge_in_config = 15; */ public Builder mergeBargeInConfig(com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig value) { if (bargeInConfigBuilder_ == null) { if (((bitField0_ & 0x00000080) != 0) && bargeInConfig_ != null && bargeInConfig_ != com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig.getDefaultInstance()) { getBargeInConfigBuilder().mergeFrom(value); } else { bargeInConfig_ = value; } } else { bargeInConfigBuilder_.mergeFrom(value); } if (bargeInConfig_ != null) { bitField0_ |= 0x00000080; onChanged(); } return this; } /** * * *
     * Configuration of barge-in behavior during the streaming of input audio.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.BargeInConfig barge_in_config = 15; */ public Builder clearBargeInConfig() { bitField0_ = (bitField0_ & ~0x00000080); bargeInConfig_ = null; if (bargeInConfigBuilder_ != null) { bargeInConfigBuilder_.dispose(); bargeInConfigBuilder_ = null; } onChanged(); return this; } /** * * *
     * Configuration of barge-in behavior during the streaming of input audio.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.BargeInConfig barge_in_config = 15; */ public com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig.Builder getBargeInConfigBuilder() { bitField0_ |= 0x00000080; onChanged(); return getBargeInConfigFieldBuilder().getBuilder(); } /** * * *
     * Configuration of barge-in behavior during the streaming of input audio.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.BargeInConfig barge_in_config = 15; */ public com.google.cloud.dialogflow.cx.v3beta1.BargeInConfigOrBuilder getBargeInConfigOrBuilder() { if (bargeInConfigBuilder_ != null) { return bargeInConfigBuilder_.getMessageOrBuilder(); } else { return bargeInConfig_ == null ? com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig.getDefaultInstance() : bargeInConfig_; } } /** * * *
     * Configuration of barge-in behavior during the streaming of input audio.
     * 
* * .google.cloud.dialogflow.cx.v3beta1.BargeInConfig barge_in_config = 15; */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig, com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig.Builder, com.google.cloud.dialogflow.cx.v3beta1.BargeInConfigOrBuilder> getBargeInConfigFieldBuilder() { if (bargeInConfigBuilder_ == null) { bargeInConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig, com.google.cloud.dialogflow.cx.v3beta1.BargeInConfig.Builder, com.google.cloud.dialogflow.cx.v3beta1.BargeInConfigOrBuilder>( getBargeInConfig(), getParentForChildren(), isClean()); bargeInConfig_ = null; } return bargeInConfigBuilder_; } private boolean optOutConformerModelMigration_; /** * * *
     * If `true`, the request will opt out for STT conformer model migration.
     * This field will be deprecated once force migration takes place in June
     * 2024. Please refer to [Dialogflow CX Speech model
     * migration](https://cloud.google.com/dialogflow/cx/docs/concept/speech-model-migration).
     * 
* * bool opt_out_conformer_model_migration = 26; * * @return The optOutConformerModelMigration. */ @java.lang.Override public boolean getOptOutConformerModelMigration() { return optOutConformerModelMigration_; } /** * * *
     * If `true`, the request will opt out for STT conformer model migration.
     * This field will be deprecated once force migration takes place in June
     * 2024. Please refer to [Dialogflow CX Speech model
     * migration](https://cloud.google.com/dialogflow/cx/docs/concept/speech-model-migration).
     * 
* * bool opt_out_conformer_model_migration = 26; * * @param value The optOutConformerModelMigration to set. * @return This builder for chaining. */ public Builder setOptOutConformerModelMigration(boolean value) { optOutConformerModelMigration_ = value; bitField0_ |= 0x00000100; onChanged(); return this; } /** * * *
     * If `true`, the request will opt out for STT conformer model migration.
     * This field will be deprecated once force migration takes place in June
     * 2024. Please refer to [Dialogflow CX Speech model
     * migration](https://cloud.google.com/dialogflow/cx/docs/concept/speech-model-migration).
     * 
* * bool opt_out_conformer_model_migration = 26; * * @return This builder for chaining. */ public Builder clearOptOutConformerModelMigration() { bitField0_ = (bitField0_ & ~0x00000100); optOutConformerModelMigration_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.dialogflow.cx.v3beta1.InputAudioConfig) } // @@protoc_insertion_point(class_scope:google.cloud.dialogflow.cx.v3beta1.InputAudioConfig) private static final com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig(); } public static com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { @java.lang.Override public InputAudioConfig parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { Builder builder = newBuilder(); try { builder.mergeFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(builder.buildPartial()); } catch (com.google.protobuf.UninitializedMessageException e) { throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e) .setUnfinishedMessage(builder.buildPartial()); } return builder.buildPartial(); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.dialogflow.cx.v3beta1.InputAudioConfig getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy