All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.speech.v1.RecognitionConfig Maven / Gradle / Ivy

There is a newer version: 4.44.0
Show newest version
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/speech/v1/cloud_speech.proto

package com.google.cloud.speech.v1;

/**
 * 
 * Provides information to the recognizer that specifies how to process the
 * request.
 * 
* * Protobuf type {@code google.cloud.speech.v1.RecognitionConfig} */ public final class RecognitionConfig extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.speech.v1.RecognitionConfig) RecognitionConfigOrBuilder { // Use RecognitionConfig.newBuilder() to construct. private RecognitionConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } private RecognitionConfig() { encoding_ = 0; sampleRateHertz_ = 0; languageCode_ = ""; maxAlternatives_ = 0; profanityFilter_ = false; speechContexts_ = java.util.Collections.emptyList(); } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private RecognitionConfig( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); int mutable_bitField0_ = 0; try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!input.skipField(tag)) { done = true; } break; } case 8: { int rawValue = input.readEnum(); encoding_ = rawValue; break; } case 16: { sampleRateHertz_ = input.readInt32(); break; } case 26: { java.lang.String s = input.readStringRequireUtf8(); languageCode_ = s; break; } case 32: { maxAlternatives_ = input.readInt32(); break; } case 40: { profanityFilter_ = input.readBool(); break; } case 50: { if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { speechContexts_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000020; } speechContexts_.add( input.readMessage(com.google.cloud.speech.v1.SpeechContext.parser(), extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_); } makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.speech.v1.SpeechProto.internal_static_google_cloud_speech_v1_RecognitionConfig_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.speech.v1.SpeechProto.internal_static_google_cloud_speech_v1_RecognitionConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.speech.v1.RecognitionConfig.class, com.google.cloud.speech.v1.RecognitionConfig.Builder.class); } /** *
   * Audio encoding of the data sent in the audio message. All encodings support
   * only 1 channel (mono) audio. Only `FLAC` includes a header that describes
   * the bytes of audio that follow the header. The other encodings are raw
   * audio bytes with no header.
   * For best results, the audio source should be captured and transmitted using
   * a lossless encoding (`FLAC` or `LINEAR16`). Recognition accuracy may be
   * reduced if lossy codecs, which include the other codecs listed in
   * this section, are used to capture or transmit the audio, particularly if
   * background noise is present.
   * 
* * Protobuf enum {@code google.cloud.speech.v1.RecognitionConfig.AudioEncoding} */ public enum AudioEncoding implements com.google.protobuf.ProtocolMessageEnum { /** *
     * Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
     * 
* * ENCODING_UNSPECIFIED = 0; */ ENCODING_UNSPECIFIED(0), /** *
     * Uncompressed 16-bit signed little-endian samples (Linear PCM).
     * 
* * LINEAR16 = 1; */ LINEAR16(1), /** *
     * [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
     * Codec) is the recommended encoding because it is
     * lossless--therefore recognition is not compromised--and
     * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
     * encoding supports 16-bit and 24-bit samples, however, not all fields in
     * `STREAMINFO` are supported.
     * 
* * FLAC = 2; */ FLAC(2), /** *
     * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
     * 
* * MULAW = 3; */ MULAW(3), /** *
     * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
     * 
* * AMR = 4; */ AMR(4), /** *
     * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
     * 
* * AMR_WB = 5; */ AMR_WB(5), /** *
     * Opus encoded audio frames in Ogg container
     * ([OggOpus](https://wiki.xiph.org/OggOpus)).
     * `sample_rate_hertz` must be 16000.
     * 
* * OGG_OPUS = 6; */ OGG_OPUS(6), /** *
     * Although the use of lossy encodings is not recommended, if a very low
     * bitrate encoding is required, `OGG_OPUS` is highly preferred over
     * Speex encoding. The [Speex](https://speex.org/)  encoding supported by
     * Cloud Speech API has a header byte in each block, as in MIME type
     * `audio/x-speex-with-header-byte`.
     * It is a variant of the RTP Speex encoding defined in
     * [RFC 5574](https://tools.ietf.org/html/rfc5574).
     * The stream is a sequence of blocks, one block per RTP packet. Each block
     * starts with a byte containing the length of the block, in bytes, followed
     * by one or more frames of Speex data, padded to an integral number of
     * bytes (octets) as specified in RFC 5574. In other words, each RTP header
     * is replaced with a single byte containing the block length. Only Speex
     * wideband is supported. `sample_rate_hertz` must be 16000.
     * 
* * SPEEX_WITH_HEADER_BYTE = 7; */ SPEEX_WITH_HEADER_BYTE(7), UNRECOGNIZED(-1), ; /** *
     * Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
     * 
* * ENCODING_UNSPECIFIED = 0; */ public static final int ENCODING_UNSPECIFIED_VALUE = 0; /** *
     * Uncompressed 16-bit signed little-endian samples (Linear PCM).
     * 
* * LINEAR16 = 1; */ public static final int LINEAR16_VALUE = 1; /** *
     * [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
     * Codec) is the recommended encoding because it is
     * lossless--therefore recognition is not compromised--and
     * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
     * encoding supports 16-bit and 24-bit samples, however, not all fields in
     * `STREAMINFO` are supported.
     * 
* * FLAC = 2; */ public static final int FLAC_VALUE = 2; /** *
     * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
     * 
* * MULAW = 3; */ public static final int MULAW_VALUE = 3; /** *
     * Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
     * 
* * AMR = 4; */ public static final int AMR_VALUE = 4; /** *
     * Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
     * 
* * AMR_WB = 5; */ public static final int AMR_WB_VALUE = 5; /** *
     * Opus encoded audio frames in Ogg container
     * ([OggOpus](https://wiki.xiph.org/OggOpus)).
     * `sample_rate_hertz` must be 16000.
     * 
* * OGG_OPUS = 6; */ public static final int OGG_OPUS_VALUE = 6; /** *
     * Although the use of lossy encodings is not recommended, if a very low
     * bitrate encoding is required, `OGG_OPUS` is highly preferred over
     * Speex encoding. The [Speex](https://speex.org/)  encoding supported by
     * Cloud Speech API has a header byte in each block, as in MIME type
     * `audio/x-speex-with-header-byte`.
     * It is a variant of the RTP Speex encoding defined in
     * [RFC 5574](https://tools.ietf.org/html/rfc5574).
     * The stream is a sequence of blocks, one block per RTP packet. Each block
     * starts with a byte containing the length of the block, in bytes, followed
     * by one or more frames of Speex data, padded to an integral number of
     * bytes (octets) as specified in RFC 5574. In other words, each RTP header
     * is replaced with a single byte containing the block length. Only Speex
     * wideband is supported. `sample_rate_hertz` must be 16000.
     * 
* * SPEEX_WITH_HEADER_BYTE = 7; */ public static final int SPEEX_WITH_HEADER_BYTE_VALUE = 7; public final int getNumber() { if (this == UNRECOGNIZED) { throw new java.lang.IllegalArgumentException( "Can't get the number of an unknown enum value."); } return value; } /** * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static AudioEncoding valueOf(int value) { return forNumber(value); } public static AudioEncoding forNumber(int value) { switch (value) { case 0: return ENCODING_UNSPECIFIED; case 1: return LINEAR16; case 2: return FLAC; case 3: return MULAW; case 4: return AMR; case 5: return AMR_WB; case 6: return OGG_OPUS; case 7: return SPEEX_WITH_HEADER_BYTE; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { return internalValueMap; } private static final com.google.protobuf.Internal.EnumLiteMap< AudioEncoding> internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public AudioEncoding findValueByNumber(int number) { return AudioEncoding.forNumber(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { return getDescriptor().getValues().get(ordinal()); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return com.google.cloud.speech.v1.RecognitionConfig.getDescriptor().getEnumTypes().get(0); } private static final AudioEncoding[] VALUES = values(); public static AudioEncoding valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "EnumValueDescriptor is not for this type."); } if (desc.getIndex() == -1) { return UNRECOGNIZED; } return VALUES[desc.getIndex()]; } private final int value; private AudioEncoding(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:google.cloud.speech.v1.RecognitionConfig.AudioEncoding) } private int bitField0_; public static final int ENCODING_FIELD_NUMBER = 1; private int encoding_; /** *
   * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
   * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; */ public int getEncodingValue() { return encoding_; } /** *
   * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
   * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; */ public com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding getEncoding() { com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding result = com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.valueOf(encoding_); return result == null ? com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.UNRECOGNIZED : result; } public static final int SAMPLE_RATE_HERTZ_FIELD_NUMBER = 2; private int sampleRateHertz_; /** *
   * *Required* Sample rate in Hertz of the audio data sent in all
   * `RecognitionAudio` messages. Valid values are: 8000-48000.
   * 16000 is optimal. For best results, set the sampling rate of the audio
   * source to 16000 Hz. If that's not possible, use the native sample rate of
   * the audio source (instead of re-sampling).
   * 
* * int32 sample_rate_hertz = 2; */ public int getSampleRateHertz() { return sampleRateHertz_; } public static final int LANGUAGE_CODE_FIELD_NUMBER = 3; private volatile java.lang.Object languageCode_; /** *
   * *Required* The language of the supplied audio as a
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
   * Example: "en-US".
   * See [Language Support](https://cloud.google.com/speech/docs/languages)
   * for a list of the currently supported language codes.
   * 
* * string language_code = 3; */ public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); languageCode_ = s; return s; } } /** *
   * *Required* The language of the supplied audio as a
   * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
   * Example: "en-US".
   * See [Language Support](https://cloud.google.com/speech/docs/languages)
   * for a list of the currently supported language codes.
   * 
* * string language_code = 3; */ public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); languageCode_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int MAX_ALTERNATIVES_FIELD_NUMBER = 4; private int maxAlternatives_; /** *
   * *Optional* Maximum number of recognition hypotheses to be returned.
   * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
   * within each `SpeechRecognitionResult`.
   * The server may return fewer than `max_alternatives`.
   * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
   * one. If omitted, will return a maximum of one.
   * 
* * int32 max_alternatives = 4; */ public int getMaxAlternatives() { return maxAlternatives_; } public static final int PROFANITY_FILTER_FIELD_NUMBER = 5; private boolean profanityFilter_; /** *
   * *Optional* If set to `true`, the server will attempt to filter out
   * profanities, replacing all but the initial character in each filtered word
   * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
   * won't be filtered out.
   * 
* * bool profanity_filter = 5; */ public boolean getProfanityFilter() { return profanityFilter_; } public static final int SPEECH_CONTEXTS_FIELD_NUMBER = 6; private java.util.List speechContexts_; /** *
   * *Optional* A means to provide context to assist the speech recognition.
   * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsList() { return speechContexts_; } /** *
   * *Optional* A means to provide context to assist the speech recognition.
   * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsOrBuilderList() { return speechContexts_; } /** *
   * *Optional* A means to provide context to assist the speech recognition.
   * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public int getSpeechContextsCount() { return speechContexts_.size(); } /** *
   * *Optional* A means to provide context to assist the speech recognition.
   * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1.SpeechContext getSpeechContexts(int index) { return speechContexts_.get(index); } /** *
   * *Optional* A means to provide context to assist the speech recognition.
   * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1.SpeechContextOrBuilder getSpeechContextsOrBuilder( int index) { return speechContexts_.get(index); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (encoding_ != com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED.getNumber()) { output.writeEnum(1, encoding_); } if (sampleRateHertz_ != 0) { output.writeInt32(2, sampleRateHertz_); } if (!getLanguageCodeBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 3, languageCode_); } if (maxAlternatives_ != 0) { output.writeInt32(4, maxAlternatives_); } if (profanityFilter_ != false) { output.writeBool(5, profanityFilter_); } for (int i = 0; i < speechContexts_.size(); i++) { output.writeMessage(6, speechContexts_.get(i)); } } public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (encoding_ != com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED.getNumber()) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(1, encoding_); } if (sampleRateHertz_ != 0) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(2, sampleRateHertz_); } if (!getLanguageCodeBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, languageCode_); } if (maxAlternatives_ != 0) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(4, maxAlternatives_); } if (profanityFilter_ != false) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(5, profanityFilter_); } for (int i = 0; i < speechContexts_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(6, speechContexts_.get(i)); } memoizedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.RecognitionConfig)) { return super.equals(obj); } com.google.cloud.speech.v1.RecognitionConfig other = (com.google.cloud.speech.v1.RecognitionConfig) obj; boolean result = true; result = result && encoding_ == other.encoding_; result = result && (getSampleRateHertz() == other.getSampleRateHertz()); result = result && getLanguageCode() .equals(other.getLanguageCode()); result = result && (getMaxAlternatives() == other.getMaxAlternatives()); result = result && (getProfanityFilter() == other.getProfanityFilter()); result = result && getSpeechContextsList() .equals(other.getSpeechContextsList()); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + ENCODING_FIELD_NUMBER; hash = (53 * hash) + encoding_; hash = (37 * hash) + SAMPLE_RATE_HERTZ_FIELD_NUMBER; hash = (53 * hash) + getSampleRateHertz(); hash = (37 * hash) + LANGUAGE_CODE_FIELD_NUMBER; hash = (53 * hash) + getLanguageCode().hashCode(); hash = (37 * hash) + MAX_ALTERNATIVES_FIELD_NUMBER; hash = (53 * hash) + getMaxAlternatives(); hash = (37 * hash) + PROFANITY_FILTER_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getProfanityFilter()); if (getSpeechContextsCount() > 0) { hash = (37 * hash) + SPEECH_CONTEXTS_FIELD_NUMBER; hash = (53 * hash) + getSpeechContextsList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static com.google.cloud.speech.v1.RecognitionConfig parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.speech.v1.RecognitionConfig parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static com.google.cloud.speech.v1.RecognitionConfig parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(com.google.cloud.speech.v1.RecognitionConfig prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** *
   * Provides information to the recognizer that specifies how to process the
   * request.
   * 
* * Protobuf type {@code google.cloud.speech.v1.RecognitionConfig} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.cloud.speech.v1.RecognitionConfig) com.google.cloud.speech.v1.RecognitionConfigOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.speech.v1.SpeechProto.internal_static_google_cloud_speech_v1_RecognitionConfig_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.speech.v1.SpeechProto.internal_static_google_cloud_speech_v1_RecognitionConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.speech.v1.RecognitionConfig.class, com.google.cloud.speech.v1.RecognitionConfig.Builder.class); } // Construct using com.google.cloud.speech.v1.RecognitionConfig.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getSpeechContextsFieldBuilder(); } } public Builder clear() { super.clear(); encoding_ = 0; sampleRateHertz_ = 0; languageCode_ = ""; maxAlternatives_ = 0; profanityFilter_ = false; if (speechContextsBuilder_ == null) { speechContexts_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); } else { speechContextsBuilder_.clear(); } return this; } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.speech.v1.SpeechProto.internal_static_google_cloud_speech_v1_RecognitionConfig_descriptor; } public com.google.cloud.speech.v1.RecognitionConfig getDefaultInstanceForType() { return com.google.cloud.speech.v1.RecognitionConfig.getDefaultInstance(); } public com.google.cloud.speech.v1.RecognitionConfig build() { com.google.cloud.speech.v1.RecognitionConfig result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public com.google.cloud.speech.v1.RecognitionConfig buildPartial() { com.google.cloud.speech.v1.RecognitionConfig result = new com.google.cloud.speech.v1.RecognitionConfig(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; result.encoding_ = encoding_; result.sampleRateHertz_ = sampleRateHertz_; result.languageCode_ = languageCode_; result.maxAlternatives_ = maxAlternatives_; result.profanityFilter_ = profanityFilter_; if (speechContextsBuilder_ == null) { if (((bitField0_ & 0x00000020) == 0x00000020)) { speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_); bitField0_ = (bitField0_ & ~0x00000020); } result.speechContexts_ = speechContexts_; } else { result.speechContexts_ = speechContextsBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder clone() { return (Builder) super.clone(); } public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.setField(field, value); } public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { return (Builder) super.setRepeatedField(field, index, value); } public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.speech.v1.RecognitionConfig) { return mergeFrom((com.google.cloud.speech.v1.RecognitionConfig)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.speech.v1.RecognitionConfig other) { if (other == com.google.cloud.speech.v1.RecognitionConfig.getDefaultInstance()) return this; if (other.encoding_ != 0) { setEncodingValue(other.getEncodingValue()); } if (other.getSampleRateHertz() != 0) { setSampleRateHertz(other.getSampleRateHertz()); } if (!other.getLanguageCode().isEmpty()) { languageCode_ = other.languageCode_; onChanged(); } if (other.getMaxAlternatives() != 0) { setMaxAlternatives(other.getMaxAlternatives()); } if (other.getProfanityFilter() != false) { setProfanityFilter(other.getProfanityFilter()); } if (speechContextsBuilder_ == null) { if (!other.speechContexts_.isEmpty()) { if (speechContexts_.isEmpty()) { speechContexts_ = other.speechContexts_; bitField0_ = (bitField0_ & ~0x00000020); } else { ensureSpeechContextsIsMutable(); speechContexts_.addAll(other.speechContexts_); } onChanged(); } } else { if (!other.speechContexts_.isEmpty()) { if (speechContextsBuilder_.isEmpty()) { speechContextsBuilder_.dispose(); speechContextsBuilder_ = null; speechContexts_ = other.speechContexts_; bitField0_ = (bitField0_ & ~0x00000020); speechContextsBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getSpeechContextsFieldBuilder() : null; } else { speechContextsBuilder_.addAllMessages(other.speechContexts_); } } } onChanged(); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { com.google.cloud.speech.v1.RecognitionConfig parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (com.google.cloud.speech.v1.RecognitionConfig) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private int encoding_ = 0; /** *
     * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
     * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; */ public int getEncodingValue() { return encoding_; } /** *
     * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
     * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; */ public Builder setEncodingValue(int value) { encoding_ = value; onChanged(); return this; } /** *
     * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
     * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; */ public com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding getEncoding() { com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding result = com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.valueOf(encoding_); return result == null ? com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding.UNRECOGNIZED : result; } /** *
     * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
     * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; */ public Builder setEncoding(com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding value) { if (value == null) { throw new NullPointerException(); } encoding_ = value.getNumber(); onChanged(); return this; } /** *
     * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
     * 
* * .google.cloud.speech.v1.RecognitionConfig.AudioEncoding encoding = 1; */ public Builder clearEncoding() { encoding_ = 0; onChanged(); return this; } private int sampleRateHertz_ ; /** *
     * *Required* Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * 
* * int32 sample_rate_hertz = 2; */ public int getSampleRateHertz() { return sampleRateHertz_; } /** *
     * *Required* Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * 
* * int32 sample_rate_hertz = 2; */ public Builder setSampleRateHertz(int value) { sampleRateHertz_ = value; onChanged(); return this; } /** *
     * *Required* Sample rate in Hertz of the audio data sent in all
     * `RecognitionAudio` messages. Valid values are: 8000-48000.
     * 16000 is optimal. For best results, set the sampling rate of the audio
     * source to 16000 Hz. If that's not possible, use the native sample rate of
     * the audio source (instead of re-sampling).
     * 
* * int32 sample_rate_hertz = 2; */ public Builder clearSampleRateHertz() { sampleRateHertz_ = 0; onChanged(); return this; } private java.lang.Object languageCode_ = ""; /** *
     * *Required* The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language Support](https://cloud.google.com/speech/docs/languages)
     * for a list of the currently supported language codes.
     * 
* * string language_code = 3; */ public java.lang.String getLanguageCode() { java.lang.Object ref = languageCode_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); languageCode_ = s; return s; } else { return (java.lang.String) ref; } } /** *
     * *Required* The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language Support](https://cloud.google.com/speech/docs/languages)
     * for a list of the currently supported language codes.
     * 
* * string language_code = 3; */ public com.google.protobuf.ByteString getLanguageCodeBytes() { java.lang.Object ref = languageCode_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); languageCode_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** *
     * *Required* The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language Support](https://cloud.google.com/speech/docs/languages)
     * for a list of the currently supported language codes.
     * 
* * string language_code = 3; */ public Builder setLanguageCode( java.lang.String value) { if (value == null) { throw new NullPointerException(); } languageCode_ = value; onChanged(); return this; } /** *
     * *Required* The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language Support](https://cloud.google.com/speech/docs/languages)
     * for a list of the currently supported language codes.
     * 
* * string language_code = 3; */ public Builder clearLanguageCode() { languageCode_ = getDefaultInstance().getLanguageCode(); onChanged(); return this; } /** *
     * *Required* The language of the supplied audio as a
     * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
     * Example: "en-US".
     * See [Language Support](https://cloud.google.com/speech/docs/languages)
     * for a list of the currently supported language codes.
     * 
* * string language_code = 3; */ public Builder setLanguageCodeBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); languageCode_ = value; onChanged(); return this; } private int maxAlternatives_ ; /** *
     * *Optional* Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * 
* * int32 max_alternatives = 4; */ public int getMaxAlternatives() { return maxAlternatives_; } /** *
     * *Optional* Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * 
* * int32 max_alternatives = 4; */ public Builder setMaxAlternatives(int value) { maxAlternatives_ = value; onChanged(); return this; } /** *
     * *Optional* Maximum number of recognition hypotheses to be returned.
     * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
     * within each `SpeechRecognitionResult`.
     * The server may return fewer than `max_alternatives`.
     * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
     * one. If omitted, will return a maximum of one.
     * 
* * int32 max_alternatives = 4; */ public Builder clearMaxAlternatives() { maxAlternatives_ = 0; onChanged(); return this; } private boolean profanityFilter_ ; /** *
     * *Optional* If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * 
* * bool profanity_filter = 5; */ public boolean getProfanityFilter() { return profanityFilter_; } /** *
     * *Optional* If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * 
* * bool profanity_filter = 5; */ public Builder setProfanityFilter(boolean value) { profanityFilter_ = value; onChanged(); return this; } /** *
     * *Optional* If set to `true`, the server will attempt to filter out
     * profanities, replacing all but the initial character in each filtered word
     * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
     * won't be filtered out.
     * 
* * bool profanity_filter = 5; */ public Builder clearProfanityFilter() { profanityFilter_ = false; onChanged(); return this; } private java.util.List speechContexts_ = java.util.Collections.emptyList(); private void ensureSpeechContextsIsMutable() { if (!((bitField0_ & 0x00000020) == 0x00000020)) { speechContexts_ = new java.util.ArrayList(speechContexts_); bitField0_ |= 0x00000020; } } private com.google.protobuf.RepeatedFieldBuilderV3< com.google.cloud.speech.v1.SpeechContext, com.google.cloud.speech.v1.SpeechContext.Builder, com.google.cloud.speech.v1.SpeechContextOrBuilder> speechContextsBuilder_; /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsList() { if (speechContextsBuilder_ == null) { return java.util.Collections.unmodifiableList(speechContexts_); } else { return speechContextsBuilder_.getMessageList(); } } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public int getSpeechContextsCount() { if (speechContextsBuilder_ == null) { return speechContexts_.size(); } else { return speechContextsBuilder_.getCount(); } } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1.SpeechContext getSpeechContexts(int index) { if (speechContextsBuilder_ == null) { return speechContexts_.get(index); } else { return speechContextsBuilder_.getMessage(index); } } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder setSpeechContexts( int index, com.google.cloud.speech.v1.SpeechContext value) { if (speechContextsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSpeechContextsIsMutable(); speechContexts_.set(index, value); onChanged(); } else { speechContextsBuilder_.setMessage(index, value); } return this; } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder setSpeechContexts( int index, com.google.cloud.speech.v1.SpeechContext.Builder builderForValue) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.set(index, builderForValue.build()); onChanged(); } else { speechContextsBuilder_.setMessage(index, builderForValue.build()); } return this; } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder addSpeechContexts(com.google.cloud.speech.v1.SpeechContext value) { if (speechContextsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSpeechContextsIsMutable(); speechContexts_.add(value); onChanged(); } else { speechContextsBuilder_.addMessage(value); } return this; } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder addSpeechContexts( int index, com.google.cloud.speech.v1.SpeechContext value) { if (speechContextsBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureSpeechContextsIsMutable(); speechContexts_.add(index, value); onChanged(); } else { speechContextsBuilder_.addMessage(index, value); } return this; } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder addSpeechContexts( com.google.cloud.speech.v1.SpeechContext.Builder builderForValue) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.add(builderForValue.build()); onChanged(); } else { speechContextsBuilder_.addMessage(builderForValue.build()); } return this; } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder addSpeechContexts( int index, com.google.cloud.speech.v1.SpeechContext.Builder builderForValue) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.add(index, builderForValue.build()); onChanged(); } else { speechContextsBuilder_.addMessage(index, builderForValue.build()); } return this; } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder addAllSpeechContexts( java.lang.Iterable values) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( values, speechContexts_); onChanged(); } else { speechContextsBuilder_.addAllMessages(values); } return this; } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder clearSpeechContexts() { if (speechContextsBuilder_ == null) { speechContexts_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000020); onChanged(); } else { speechContextsBuilder_.clear(); } return this; } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public Builder removeSpeechContexts(int index) { if (speechContextsBuilder_ == null) { ensureSpeechContextsIsMutable(); speechContexts_.remove(index); onChanged(); } else { speechContextsBuilder_.remove(index); } return this; } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1.SpeechContext.Builder getSpeechContextsBuilder( int index) { return getSpeechContextsFieldBuilder().getBuilder(index); } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1.SpeechContextOrBuilder getSpeechContextsOrBuilder( int index) { if (speechContextsBuilder_ == null) { return speechContexts_.get(index); } else { return speechContextsBuilder_.getMessageOrBuilder(index); } } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsOrBuilderList() { if (speechContextsBuilder_ != null) { return speechContextsBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(speechContexts_); } } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1.SpeechContext.Builder addSpeechContextsBuilder() { return getSpeechContextsFieldBuilder().addBuilder( com.google.cloud.speech.v1.SpeechContext.getDefaultInstance()); } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public com.google.cloud.speech.v1.SpeechContext.Builder addSpeechContextsBuilder( int index) { return getSpeechContextsFieldBuilder().addBuilder( index, com.google.cloud.speech.v1.SpeechContext.getDefaultInstance()); } /** *
     * *Optional* A means to provide context to assist the speech recognition.
     * 
* * repeated .google.cloud.speech.v1.SpeechContext speech_contexts = 6; */ public java.util.List getSpeechContextsBuilderList() { return getSpeechContextsFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3< com.google.cloud.speech.v1.SpeechContext, com.google.cloud.speech.v1.SpeechContext.Builder, com.google.cloud.speech.v1.SpeechContextOrBuilder> getSpeechContextsFieldBuilder() { if (speechContextsBuilder_ == null) { speechContextsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< com.google.cloud.speech.v1.SpeechContext, com.google.cloud.speech.v1.SpeechContext.Builder, com.google.cloud.speech.v1.SpeechContextOrBuilder>( speechContexts_, ((bitField0_ & 0x00000020) == 0x00000020), getParentForChildren(), isClean()); speechContexts_ = null; } return speechContextsBuilder_; } public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return this; } public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return this; } // @@protoc_insertion_point(builder_scope:google.cloud.speech.v1.RecognitionConfig) } // @@protoc_insertion_point(class_scope:google.cloud.speech.v1.RecognitionConfig) private static final com.google.cloud.speech.v1.RecognitionConfig DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.speech.v1.RecognitionConfig(); } public static com.google.cloud.speech.v1.RecognitionConfig getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { public RecognitionConfig parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RecognitionConfig(input, extensionRegistry); } }; public static com.google.protobuf.Parser parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser getParserForType() { return PARSER; } public com.google.cloud.speech.v1.RecognitionConfig getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy