All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResultOrBuilder Maven / Gradle / Ivy

The newest version!
/*
 * Copyright 2024 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: google/cloud/dialogflow/cx/v3beta1/session.proto

// Protobuf Java Version: 3.25.5
package com.google.cloud.dialogflow.cx.v3beta1;

public interface StreamingRecognitionResultOrBuilder
    extends
    // @@protoc_insertion_point(interface_extends:google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult)
    com.google.protobuf.MessageOrBuilder {

  /**
   *
   *
   * 
   * Type of the result message.
   * 
* * * .google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult.MessageType message_type = 1; * * * @return The enum numeric value on the wire for messageType. */ int getMessageTypeValue(); /** * * *
   * Type of the result message.
   * 
* * * .google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult.MessageType message_type = 1; * * * @return The messageType. */ com.google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult.MessageType getMessageType(); /** * * *
   * Transcript text representing the words that the user spoke.
   * Populated if and only if `message_type` = `TRANSCRIPT`.
   * 
* * string transcript = 2; * * @return The transcript. */ java.lang.String getTranscript(); /** * * *
   * Transcript text representing the words that the user spoke.
   * Populated if and only if `message_type` = `TRANSCRIPT`.
   * 
* * string transcript = 2; * * @return The bytes for transcript. */ com.google.protobuf.ByteString getTranscriptBytes(); /** * * *
   * If `false`, the `StreamingRecognitionResult` represents an
   * interim result that may change. If `true`, the recognizer will not return
   * any further hypotheses about this piece of the audio. May only be populated
   * for `message_type` = `TRANSCRIPT`.
   * 
* * bool is_final = 3; * * @return The isFinal. */ boolean getIsFinal(); /** * * *
   * The Speech confidence between 0.0 and 1.0 for the current portion of audio.
   * A higher number indicates an estimated greater likelihood that the
   * recognized words are correct. The default of 0.0 is a sentinel value
   * indicating that confidence was not set.
   *
   * This field is typically only provided if `is_final` is true and you should
   * not rely on it being accurate or even set.
   * 
* * float confidence = 4; * * @return The confidence. */ float getConfidence(); /** * * *
   * An estimate of the likelihood that the speech recognizer will
   * not change its guess about this interim recognition result:
   * * If the value is unspecified or 0.0, Dialogflow didn't compute the
   *   stability. In particular, Dialogflow will only provide stability for
   *   `TRANSCRIPT` results with `is_final = false`.
   * * Otherwise, the value is in (0.0, 1.0] where 0.0 means completely
   *   unstable and 1.0 means completely stable.
   * 
* * float stability = 6; * * @return The stability. */ float getStability(); /** * * *
   * Word-specific information for the words recognized by Speech in
   * [transcript][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult.transcript].
   * Populated if and only if `message_type` = `TRANSCRIPT` and
   * [InputAudioConfig.enable_word_info] is set.
   * 
* * repeated .google.cloud.dialogflow.cx.v3beta1.SpeechWordInfo speech_word_info = 7; */ java.util.List getSpeechWordInfoList(); /** * * *
   * Word-specific information for the words recognized by Speech in
   * [transcript][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult.transcript].
   * Populated if and only if `message_type` = `TRANSCRIPT` and
   * [InputAudioConfig.enable_word_info] is set.
   * 
* * repeated .google.cloud.dialogflow.cx.v3beta1.SpeechWordInfo speech_word_info = 7; */ com.google.cloud.dialogflow.cx.v3beta1.SpeechWordInfo getSpeechWordInfo(int index); /** * * *
   * Word-specific information for the words recognized by Speech in
   * [transcript][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult.transcript].
   * Populated if and only if `message_type` = `TRANSCRIPT` and
   * [InputAudioConfig.enable_word_info] is set.
   * 
* * repeated .google.cloud.dialogflow.cx.v3beta1.SpeechWordInfo speech_word_info = 7; */ int getSpeechWordInfoCount(); /** * * *
   * Word-specific information for the words recognized by Speech in
   * [transcript][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult.transcript].
   * Populated if and only if `message_type` = `TRANSCRIPT` and
   * [InputAudioConfig.enable_word_info] is set.
   * 
* * repeated .google.cloud.dialogflow.cx.v3beta1.SpeechWordInfo speech_word_info = 7; */ java.util.List getSpeechWordInfoOrBuilderList(); /** * * *
   * Word-specific information for the words recognized by Speech in
   * [transcript][google.cloud.dialogflow.cx.v3beta1.StreamingRecognitionResult.transcript].
   * Populated if and only if `message_type` = `TRANSCRIPT` and
   * [InputAudioConfig.enable_word_info] is set.
   * 
* * repeated .google.cloud.dialogflow.cx.v3beta1.SpeechWordInfo speech_word_info = 7; */ com.google.cloud.dialogflow.cx.v3beta1.SpeechWordInfoOrBuilder getSpeechWordInfoOrBuilder( int index); /** * * *
   * Time offset of the end of this Speech recognition result relative to the
   * beginning of the audio. Only populated for `message_type` =
   * `TRANSCRIPT`.
   * 
* * .google.protobuf.Duration speech_end_offset = 8; * * @return Whether the speechEndOffset field is set. */ boolean hasSpeechEndOffset(); /** * * *
   * Time offset of the end of this Speech recognition result relative to the
   * beginning of the audio. Only populated for `message_type` =
   * `TRANSCRIPT`.
   * 
* * .google.protobuf.Duration speech_end_offset = 8; * * @return The speechEndOffset. */ com.google.protobuf.Duration getSpeechEndOffset(); /** * * *
   * Time offset of the end of this Speech recognition result relative to the
   * beginning of the audio. Only populated for `message_type` =
   * `TRANSCRIPT`.
   * 
* * .google.protobuf.Duration speech_end_offset = 8; */ com.google.protobuf.DurationOrBuilder getSpeechEndOffsetOrBuilder(); /** * * *
   * Detected language code for the transcript.
   * 
* * string language_code = 10; * * @return The languageCode. */ java.lang.String getLanguageCode(); /** * * *
   * Detected language code for the transcript.
   * 
* * string language_code = 10; * * @return The bytes for languageCode. */ com.google.protobuf.ByteString getLanguageCodeBytes(); }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy