google.cloud.speech.v1.cloud_speech.proto Maven / Gradle / Ivy
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.speech.v1;
import "google/api/annotations.proto";
import "google/rpc/status.proto";
option java_multiple_files = true;
option java_outer_classname = "SpeechProto";
option java_package = "com.google.cloud.speech.v1";
// Service that implements Google Cloud Speech API.
service Speech {
// Perform bidirectional streaming speech recognition on audio using gRPC.
rpc Recognize(stream RecognizeRequest) returns (stream RecognizeResponse);
// Perform non-streaming speech recognition on audio using HTTPS.
rpc NonStreamingRecognize(RecognizeRequest) returns (NonStreamingRecognizeResponse) {
option (google.api.http) = { post: "/v1/speech:recognize" body: "*" };
}
}
// `RecognizeRequest` is the only message type sent by the client.
//
// `NonStreamingRecognize` sends only one `RecognizeRequest` message and it
// must contain both an `initial_request` and an 'audio_request`.
//
// Streaming `Recognize` sends one or more `RecognizeRequest` messages. The
// first message must contain an `initial_request` and may contain an
// 'audio_request`. Any subsequent messages must not contain an
// `initial_request` and must contain an 'audio_request`.
message RecognizeRequest {
// The `initial_request` message provides information to the recognizer
// that specifies how to process the request.
//
// The first `RecognizeRequest` message must contain an `initial_request`.
// Any subsequent `RecognizeRequest` messages must not contain an
// `initial_request`.
InitialRecognizeRequest initial_request = 1;
// The audio data to be recognized. For `NonStreamingRecognize`, all the
// audio data must be contained in the first (and only) `RecognizeRequest`
// message. For streaming `Recognize`, sequential chunks of audio data are
// sent in sequential `RecognizeRequest` messages.
AudioRequest audio_request = 2;
}
// The `InitialRecognizeRequest` message provides information to the recognizer
// that specifies how to process the request.
message InitialRecognizeRequest {
// Audio encoding of the data sent in the audio message.
enum AudioEncoding {
// Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
ENCODING_UNSPECIFIED = 0;
// Uncompressed 16-bit signed little-endian samples.
// This is the simplest encoding format, useful for getting started.
// However, because it is uncompressed, it is not recommended for deployed
// clients.
LINEAR16 = 1;
// This is the recommended encoding format because it uses lossless
// compression; therefore recognition accuracy is not compromised by a lossy
// codec.
//
// The stream FLAC format is specified at:
// http://flac.sourceforge.net/documentation.html.
// Only 16-bit samples are supported.
// Not all fields in STREAMINFO are supported.
FLAC = 2;
// 8-bit samples that compand 14-bit audio samples using PCMU/mu-law.
MULAW = 3;
// Adaptive Multi-Rate Narrowband codec. `sample_rate` must be 8000 Hz.
AMR = 4;
// Adaptive Multi-Rate Wideband codec. `sample_rate` must be 16000 Hz.
AMR_WB = 5;
}
// [Required] Encoding of audio data sent in all `AudioRequest` messages.
AudioEncoding encoding = 1;
// [Required] Sample rate in Hertz of the audio data sent in all
// AudioRequest messages.
// 16000 is optimal. Valid values are: 8000-48000.
int32 sample_rate = 2;
// [Optional] The language of the supplied audio as a BCP-47 language tag.
// Example: "en-GB" https://www.rfc-editor.org/rfc/bcp/bcp47.txt
// If omitted, defaults to "en-US".
string language_code = 3;
// [Optional] Maximum number of recognition hypotheses to be returned.
// Specifically, the maximum number of `SpeechRecognitionAlternative` messages
// within each `SpeechRecognitionResult`.
// The server may return fewer than `max_alternatives`.
// Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
// `1`. If omitted, defaults to `1`.
int32 max_alternatives = 4;
// [Optional] If set to `true`, the server will attempt to filter out
// profanities, replacing all but the initial character in each filtered word
// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
// won't be filtered out.
bool profanity_filter = 5;
// [Optional] If `false` or omitted, the recognizer will detect a single
// spoken utterance, and it will cease recognition when the user stops
// speaking. If `enable_endpointer_events` is `true`, it will return
// `END_OF_UTTERANCE` when it detects that the user has stopped speaking.
// In all cases, it will return no more than one `SpeechRecognitionResult`,
// and set the `is_final` flag to `true`.
//
// If `true`, the recognizer will continue recognition (even if the user
// pauses speaking) until the client sends an `end_of_data` message or when
// the maximum time limit has been reached. Multiple
// `SpeechRecognitionResult`s with the `is_final` flag set to `true` may be
// returned to indicate that the recognizer will not return any further
// hypotheses for this portion of the transcript.
bool continuous = 6;
// [Optional] If this parameter is `true`, interim results may be returned as
// they become available.
// If `false` or omitted, only `is_final=true` result(s) are returned.
bool interim_results = 7;
// [Optional] If this parameter is `true`, `EndpointerEvents` may be returned
// as they become available.
// If `false` or omitted, no `EndpointerEvents` are returned.
bool enable_endpointer_events = 8;
// [Optional] URI that points to a file where the recognition result should
// be stored in JSON format. If omitted or empty string, the recognition
// result is returned in the response. Should be specified only for
// `NonStreamingRecognize`. If specified in a `Recognize` request,
// `Recognize` returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
// If specified in a `NonStreamingRecognize` request,
// `NonStreamingRecognize` returns immediately, and the output file
// is created asynchronously once the audio processing completes.
// Currently, only Google Cloud Storage URIs are supported, which must be
// specified in the following format: `gs://bucket_name/object_name`
// (other URI formats return [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
// more information, see [Request URIs](/storage/docs/reference-uris).
string output_uri = 9;
}
// Contains audio data in the format specified in the `InitialRecognizeRequest`.
// Either `content` or `uri` must be supplied. Supplying both or neither
// returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
message AudioRequest {
// The audio data bytes encoded as specified in
// `InitialRecognizeRequest`. Note: as with all bytes fields, protobuffers
// use a pure binary representation, whereas JSON representations use base64.
bytes content = 1;
// URI that points to a file that contains audio data bytes as specified in
// `InitialRecognizeRequest`. Currently, only Google Cloud Storage URIs are
// supported, which must be specified in the following format:
// `gs://bucket_name/object_name` (other URI formats return
// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
// [Request URIs](/storage/docs/reference-uris).
string uri = 2;
}
// `NonStreamingRecognizeResponse` is the only message returned to the client by
// `NonStreamingRecognize`. It contains the result as zero or more sequential
// `RecognizeResponse` messages.
//
// Note that streaming `Recognize` will also return multiple `RecognizeResponse`
// messages, but each message is individually streamed.
message NonStreamingRecognizeResponse {
// [Output-only] Sequential list of messages returned by the recognizer.
repeated RecognizeResponse responses = 1;
}
// `RecognizeResponse` is the only message type returned to the client.
message RecognizeResponse {
// Indicates the type of endpointer event.
enum EndpointerEvent {
// No endpointer event specified.
ENDPOINTER_EVENT_UNSPECIFIED = 0;
// Speech has been detected in the audio stream.
START_OF_SPEECH = 1;
// Speech has ceased to be detected in the audio stream.
END_OF_SPEECH = 2;
// The end of the audio stream has been reached. and it is being processed.
END_OF_AUDIO = 3;
// This event is only sent when continuous is `false`. It indicates that the
// server has detected the end of the user's speech utterance and expects no
// additional speech. Therefore, the server will not process additional
// audio. The client should stop sending additional audio data.
END_OF_UTTERANCE = 4;
}
// [Output-only] If set, returns a [google.rpc.Status][] message that
// specifies the error for the operation.
google.rpc.Status error = 1;
// [Output-only] For `continuous=false`, this repeated list contains zero or
// one result that corresponds to all of the audio processed so far. For
// `continuous=true`, this repeated list contains zero or more results that
// correspond to consecutive portions of the audio being processed.
// In both cases, contains zero or one `is_final=true` result (the newly
// settled portion), followed by zero or more `is_final=false` results.
repeated SpeechRecognitionResult results = 2;
// [Output-only] Indicates the lowest index in the `results` array that has
// changed. The repeated `SpeechRecognitionResult` results overwrite past
// results at this index and higher.
int32 result_index = 3;
// [Output-only] Indicates the type of endpointer event.
EndpointerEvent endpoint = 4;
}
// A speech recognition result corresponding to a portion of the audio.
message SpeechRecognitionResult {
// [Output-only] May contain one or more recognition hypotheses (up to the
// maximum specified in `max_alternatives`).
repeated SpeechRecognitionAlternative alternatives = 1;
// [Output-only] Set `true` if this is the final time the speech service will
// return this particular `SpeechRecognitionResult`. If `false`, this
// represents an interim result that may change.
bool is_final = 2;
// [Output-only] An estimate of the probability that the recognizer will not
// change its guess about this interim result. Values range from 0.0
// (completely unstable) to 1.0 (completely stable). Note that this is not the
// same as `confidence`, which estimates the probability that a recognition
// result is correct.
// This field is only provided for interim results (`is_final=false`).
// The default of 0.0 is a sentinel value indicating stability was not set.
float stability = 3;
}
// Alternative hypotheses (a.k.a. n-best list).
message SpeechRecognitionAlternative {
// [Output-only] Transcript text representing the words that the user spoke.
string transcript = 1;
// [Output-only] The confidence estimate between 0.0 and 1.0. A higher number
// means the system is more confident that the recognition is correct.
// This field is typically provided only for the top hypothesis. and only for
// `is_final=true` results.
// The default of 0.0 is a sentinel value indicating confidence was not set.
float confidence = 2;
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy