Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.v2;
import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/api/routing.proto";
import "google/bigtable/v2/data.proto";
import "google/bigtable/v2/request_stats.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
import "google/protobuf/wrappers.proto";
import "google/rpc/status.proto";
option csharp_namespace = "Google.Cloud.Bigtable.V2";
option go_package = "cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb";
option java_multiple_files = true;
option java_outer_classname = "BigtableProto";
option java_package = "com.google.bigtable.v2";
option php_namespace = "Google\\Cloud\\Bigtable\\V2";
option ruby_package = "Google::Cloud::Bigtable::V2";
option (google.api.resource_definition) = {
type: "bigtableadmin.googleapis.com/Instance"
pattern: "projects/{project}/instances/{instance}"
};
option (google.api.resource_definition) = {
type: "bigtableadmin.googleapis.com/Table"
pattern: "projects/{project}/instances/{instance}/tables/{table}"
};
option (google.api.resource_definition) = {
type: "bigtableadmin.googleapis.com/AuthorizedView"
pattern: "projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}"
};
// Service for reading from and writing to existing Bigtable tables.
service Bigtable {
option (google.api.default_host) = "bigtable.googleapis.com";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/bigtable.data,"
"https://www.googleapis.com/auth/bigtable.data.readonly,"
"https://www.googleapis.com/auth/cloud-bigtable.data,"
"https://www.googleapis.com/auth/cloud-bigtable.data.readonly,"
"https://www.googleapis.com/auth/cloud-platform,"
"https://www.googleapis.com/auth/cloud-platform.read-only";
// Streams back the contents of all requested rows in key order, optionally
// applying the same Reader filter to each. Depending on their size,
// rows and cells may be broken up across multiple responses, but
// atomicity of each row will still be preserved. See the
// ReadRowsResponse documentation for details.
rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
option (google.api.http) = {
post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows"
body: "*"
additional_bindings {
post: "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows"
body: "*"
}
};
option (google.api.routing) = {
routing_parameters {
field: "table_name"
path_template: "{table_name=projects/*/instances/*/tables/*}"
}
routing_parameters { field: "app_profile_id" }
routing_parameters {
field: "authorized_view_name"
path_template: "{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}"
}
};
option (google.api.method_signature) = "table_name";
option (google.api.method_signature) = "table_name,app_profile_id";
}
// Returns a sample of row keys in the table. The returned row keys will
// delimit contiguous sections of the table of approximately equal size,
// which can be used to break up the data for distributed tasks like
// mapreduces.
rpc SampleRowKeys(SampleRowKeysRequest)
returns (stream SampleRowKeysResponse) {
option (google.api.http) = {
get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys"
additional_bindings {
get: "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys"
}
};
option (google.api.routing) = {
routing_parameters {
field: "table_name"
path_template: "{table_name=projects/*/instances/*/tables/*}"
}
routing_parameters { field: "app_profile_id" }
routing_parameters {
field: "authorized_view_name"
path_template: "{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}"
}
};
option (google.api.method_signature) = "table_name";
option (google.api.method_signature) = "table_name,app_profile_id";
}
// Mutates a row atomically. Cells already present in the row are left
// unchanged unless explicitly changed by `mutation`.
rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) {
option (google.api.http) = {
post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow"
body: "*"
additional_bindings {
post: "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRow"
body: "*"
}
};
option (google.api.routing) = {
routing_parameters {
field: "table_name"
path_template: "{table_name=projects/*/instances/*/tables/*}"
}
routing_parameters { field: "app_profile_id" }
routing_parameters {
field: "authorized_view_name"
path_template: "{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}"
}
};
option (google.api.method_signature) = "table_name,row_key,mutations";
option (google.api.method_signature) =
"table_name,row_key,mutations,app_profile_id";
}
// Mutates multiple rows in a batch. Each individual row is mutated
// atomically as in MutateRow, but the entire batch is not executed
// atomically.
rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) {
option (google.api.http) = {
post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows"
body: "*"
additional_bindings {
post: "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRows"
body: "*"
}
};
option (google.api.routing) = {
routing_parameters {
field: "table_name"
path_template: "{table_name=projects/*/instances/*/tables/*}"
}
routing_parameters { field: "app_profile_id" }
routing_parameters {
field: "authorized_view_name"
path_template: "{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}"
}
};
option (google.api.method_signature) = "table_name,entries";
option (google.api.method_signature) = "table_name,entries,app_profile_id";
}
// Mutates a row atomically based on the output of a predicate Reader filter.
rpc CheckAndMutateRow(CheckAndMutateRowRequest)
returns (CheckAndMutateRowResponse) {
option (google.api.http) = {
post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow"
body: "*"
additional_bindings {
post: "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:checkAndMutateRow"
body: "*"
}
};
option (google.api.routing) = {
routing_parameters {
field: "table_name"
path_template: "{table_name=projects/*/instances/*/tables/*}"
}
routing_parameters { field: "app_profile_id" }
routing_parameters {
field: "authorized_view_name"
path_template: "{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}"
}
};
option (google.api.method_signature) =
"table_name,row_key,predicate_filter,true_mutations,false_mutations";
option (google.api.method_signature) =
"table_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id";
}
// Warm up associated instance metadata for this connection.
// This call is not required but may be useful for connection keep-alive.
rpc PingAndWarm(PingAndWarmRequest) returns (PingAndWarmResponse) {
option (google.api.http) = {
post: "/v2/{name=projects/*/instances/*}:ping"
body: "*"
};
option (google.api.routing) = {
routing_parameters {
field: "name"
path_template: "{name=projects/*/instances/*}"
}
routing_parameters { field: "app_profile_id" }
};
option (google.api.method_signature) = "name";
option (google.api.method_signature) = "name,app_profile_id";
}
// Modifies a row atomically on the server. The method reads the latest
// existing timestamp and value from the specified columns and writes a new
// entry based on pre-defined read/modify/write rules. The new value for the
// timestamp is the greater of the existing timestamp or the current server
// time. The method returns the new contents of all modified cells.
rpc ReadModifyWriteRow(ReadModifyWriteRowRequest)
returns (ReadModifyWriteRowResponse) {
option (google.api.http) = {
post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow"
body: "*"
additional_bindings {
post: "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readModifyWriteRow"
body: "*"
}
};
option (google.api.routing) = {
routing_parameters {
field: "table_name"
path_template: "{table_name=projects/*/instances/*/tables/*}"
}
routing_parameters { field: "app_profile_id" }
routing_parameters {
field: "authorized_view_name"
path_template: "{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}"
}
};
option (google.api.method_signature) = "table_name,row_key,rules";
option (google.api.method_signature) =
"table_name,row_key,rules,app_profile_id";
}
// NOTE: This API is intended to be used by Apache Beam BigtableIO.
// Returns the current list of partitions that make up the table's
// change stream. The union of partitions will cover the entire keyspace.
// Partitions can be read with `ReadChangeStream`.
rpc GenerateInitialChangeStreamPartitions(
GenerateInitialChangeStreamPartitionsRequest)
returns (stream GenerateInitialChangeStreamPartitionsResponse) {
option (google.api.http) = {
post: "/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions"
body: "*"
};
option (google.api.method_signature) = "table_name";
option (google.api.method_signature) = "table_name,app_profile_id";
}
// NOTE: This API is intended to be used by Apache Beam BigtableIO.
// Reads changes from a table's change stream. Changes will
// reflect both user-initiated mutations and mutations that are caused by
// garbage collection.
rpc ReadChangeStream(ReadChangeStreamRequest)
returns (stream ReadChangeStreamResponse) {
option (google.api.http) = {
post: "/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream"
body: "*"
};
option (google.api.method_signature) = "table_name";
option (google.api.method_signature) = "table_name,app_profile_id";
}
// Executes a BTQL query against a particular Cloud Bigtable instance.
rpc ExecuteQuery(ExecuteQueryRequest) returns (stream ExecuteQueryResponse) {
option (google.api.http) = {
post: "/v2/{instance_name=projects/*/instances/*}:executeQuery"
body: "*"
};
option (google.api.routing) = {
routing_parameters {
field: "instance_name"
path_template: "{name=projects/*/instances/*}"
}
routing_parameters { field: "app_profile_id" }
};
option (google.api.method_signature) = "instance_name,query";
option (google.api.method_signature) = "instance_name,query,app_profile_id";
}
}
// Request message for Bigtable.ReadRows.
message ReadRowsRequest {
// The desired view into RequestStats that should be returned in the response.
//
// See also: RequestStats message.
enum RequestStatsView {
// The default / unset value. The API will default to the NONE option below.
REQUEST_STATS_VIEW_UNSPECIFIED = 0;
// Do not include any RequestStats in the response. This will leave the
// RequestStats embedded message unset in the response.
REQUEST_STATS_NONE = 1;
// Include the full set of available RequestStats in the response,
// applicable to this read.
REQUEST_STATS_FULL = 2;
}
// Optional. The unique name of the table from which to read.
//
// Values are of the form
// `projects//instances//tables/
`.
string table_name = 1 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/Table"
}
];
// Optional. The unique name of the AuthorizedView from which to read.
//
// Values are of the form
// `projects//instances//tables/
/authorizedViews/`.
string authorized_view_name = 9 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/AuthorizedView"
}
];
// This value specifies routing for replication. If not specified, the
// "default" application profile will be used.
string app_profile_id = 5;
// The row keys and/or ranges to read sequentially. If not specified, reads
// from all rows.
RowSet rows = 2;
// The filter to apply to the contents of the specified row(s). If unset,
// reads the entirety of each row.
RowFilter filter = 3;
// The read will stop after committing to N rows' worth of results. The
// default (zero) is to return all results.
int64 rows_limit = 4;
// The view into RequestStats, as described above.
RequestStatsView request_stats_view = 6;
// Experimental API - Please note that this API is currently experimental
// and can change in the future.
//
// Return rows in lexiographical descending order of the row keys. The row
// contents will not be affected by this flag.
//
// Example result set:
//
// [
// {key: "k2", "f:col1": "v1", "f:col2": "v1"},
// {key: "k1", "f:col1": "v2", "f:col2": "v2"}
// ]
bool reversed = 7;
}
// Response message for Bigtable.ReadRows.
message ReadRowsResponse {
// Specifies a piece of a row's contents returned as part of the read
// response stream.
message CellChunk {
// The row key for this chunk of data. If the row key is empty,
// this CellChunk is a continuation of the same row as the previous
// CellChunk in the response stream, even if that CellChunk was in a
// previous ReadRowsResponse message.
bytes row_key = 1;
// The column family name for this chunk of data. If this message
// is not present this CellChunk is a continuation of the same column
// family as the previous CellChunk. The empty string can occur as a
// column family name in a response so clients must check
// explicitly for the presence of this message, not just for
// `family_name.value` being non-empty.
google.protobuf.StringValue family_name = 2;
// The column qualifier for this chunk of data. If this message
// is not present, this CellChunk is a continuation of the same column
// as the previous CellChunk. Column qualifiers may be empty so
// clients must check for the presence of this message, not just
// for `qualifier.value` being non-empty.
google.protobuf.BytesValue qualifier = 3;
// The cell's stored timestamp, which also uniquely identifies it
// within its column. Values are always expressed in
// microseconds, but individual tables may set a coarser
// granularity to further restrict the allowed values. For
// example, a table which specifies millisecond granularity will
// only allow values of `timestamp_micros` which are multiples of
// 1000. Timestamps are only set in the first CellChunk per cell
// (for cells split into multiple chunks).
int64 timestamp_micros = 4;
// Labels applied to the cell by a
// [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set
// on the first CellChunk per cell.
repeated string labels = 5;
// The value stored in the cell. Cell values can be split across
// multiple CellChunks. In that case only the value field will be
// set in CellChunks after the first: the timestamp and labels
// will only be present in the first CellChunk, even if the first
// CellChunk came in a previous ReadRowsResponse.
bytes value = 6;
// If this CellChunk is part of a chunked cell value and this is
// not the final chunk of that cell, value_size will be set to the
// total length of the cell value. The client can use this size
// to pre-allocate memory to hold the full cell value.
int32 value_size = 7;
// Signals to the client concerning previous CellChunks received.
oneof row_status {
// Indicates that the client should drop all previous chunks for
// `row_key`, as it will be re-read from the beginning.
bool reset_row = 8;
// Indicates that the client can safely process all previous chunks for
// `row_key`, as its data has been fully read.
bool commit_row = 9;
}
}
// A collection of a row's contents as part of the read request.
repeated CellChunk chunks = 1;
// Optionally the server might return the row key of the last row it
// has scanned. The client can use this to construct a more
// efficient retry request if needed: any row keys or portions of
// ranges less than this row key can be dropped from the request.
// This is primarily useful for cases where the server has read a
// lot of data that was filtered out since the last committed row
// key, allowing the client to skip that work on a retry.
bytes last_scanned_row_key = 2;
//
// If requested, provide enhanced query performance statistics. The semantics
// dictate:
// * request_stats is empty on every (streamed) response, except
// * request_stats has non-empty information after all chunks have been
// streamed, where the ReadRowsResponse message only contains
// request_stats.
// * For example, if a read request would have returned an empty
// response instead a single ReadRowsResponse is streamed with empty
// chunks and request_stats filled.
//
// Visually, response messages will stream as follows:
// ... -> {chunks: [...]} -> {chunks: [], request_stats: {...}}
// \______________________/ \________________________________/
// Primary response Trailer of RequestStats info
//
// Or if the read did not return any values:
// {chunks: [], request_stats: {...}}
// \________________________________/
// Trailer of RequestStats info
RequestStats request_stats = 3;
}
// Request message for Bigtable.SampleRowKeys.
message SampleRowKeysRequest {
// Optional. The unique name of the table from which to sample row keys.
//
// Values are of the form
// `projects//instances//tables/
`.
string table_name = 1 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/Table"
}
];
// Optional. The unique name of the AuthorizedView from which to sample row
// keys.
//
// Values are of the form
// `projects//instances//tables/
/authorizedViews/`.
string authorized_view_name = 4 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/AuthorizedView"
}
];
// This value specifies routing for replication. If not specified, the
// "default" application profile will be used.
string app_profile_id = 2;
}
// Response message for Bigtable.SampleRowKeys.
message SampleRowKeysResponse {
// Sorted streamed sequence of sample row keys in the table. The table might
// have contents before the first row key in the list and after the last one,
// but a key containing the empty string indicates "end of table" and will be
// the last response given, if present.
// Note that row keys in this list may not have ever been written to or read
// from, and users should therefore not make any assumptions about the row key
// structure that are specific to their use case.
bytes row_key = 1;
// Approximate total storage space used by all rows in the table which precede
// `row_key`. Buffering the contents of all rows between two subsequent
// samples would require space roughly equal to the difference in their
// `offset_bytes` fields.
int64 offset_bytes = 2;
}
// Request message for Bigtable.MutateRow.
message MutateRowRequest {
// Optional. The unique name of the table to which the mutation should be
// applied.
//
// Values are of the form
// `projects//instances//tables/
`.
string table_name = 1 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/Table"
}
];
// Optional. The unique name of the AuthorizedView to which the mutation
// should be applied.
//
// Values are of the form
// `projects//instances//tables/
/authorizedViews/`.
string authorized_view_name = 6 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/AuthorizedView"
}
];
// This value specifies routing for replication. If not specified, the
// "default" application profile will be used.
string app_profile_id = 4;
// Required. The key of the row to which the mutation should be applied.
bytes row_key = 2 [(google.api.field_behavior) = REQUIRED];
// Required. Changes to be atomically applied to the specified row. Entries
// are applied in order, meaning that earlier mutations can be masked by later
// ones. Must contain at least one entry and at most 100000.
repeated Mutation mutations = 3 [(google.api.field_behavior) = REQUIRED];
}
// Response message for Bigtable.MutateRow.
message MutateRowResponse {}
// Request message for BigtableService.MutateRows.
message MutateRowsRequest {
// A mutation for a given row.
message Entry {
// The key of the row to which the `mutations` should be applied.
bytes row_key = 1;
// Required. Changes to be atomically applied to the specified row.
// Mutations are applied in order, meaning that earlier mutations can be
// masked by later ones. You must specify at least one mutation.
repeated Mutation mutations = 2 [(google.api.field_behavior) = REQUIRED];
}
// Optional. The unique name of the table to which the mutations should be
// applied.
//
// Values are of the form
// `projects//instances//tables/
`.
string table_name = 1 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/Table"
}
];
// Optional. The unique name of the AuthorizedView to which the mutations
// should be applied.
//
// Values are of the form
// `projects//instances//tables/
/authorizedViews/`.
string authorized_view_name = 5 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/AuthorizedView"
}
];
// This value specifies routing for replication. If not specified, the
// "default" application profile will be used.
string app_profile_id = 3;
// Required. The row keys and corresponding mutations to be applied in bulk.
// Each entry is applied as an atomic mutation, but the entries may be
// applied in arbitrary order (even between entries for the same row).
// At least one entry must be specified, and in total the entries can
// contain at most 100000 mutations.
repeated Entry entries = 2 [(google.api.field_behavior) = REQUIRED];
}
// Response message for BigtableService.MutateRows.
message MutateRowsResponse {
// The result of applying a passed mutation in the original request.
message Entry {
// The index into the original request's `entries` list of the Entry
// for which a result is being reported.
int64 index = 1;
// The result of the request Entry identified by `index`.
// Depending on how requests are batched during execution, it is possible
// for one Entry to fail due to an error with another Entry. In the event
// that this occurs, the same error will be reported for both entries.
google.rpc.Status status = 2;
}
// One or more results for Entries from the batch request.
repeated Entry entries = 1;
// Information about how client should limit the rate (QPS). Primirily used by
// supported official Cloud Bigtable clients. If unset, the rate limit info is
// not provided by the server.
optional RateLimitInfo rate_limit_info = 3;
}
// Information about how client should adjust the load to Bigtable.
message RateLimitInfo {
// Time that clients should wait before adjusting the target rate again.
// If clients adjust rate too frequently, the impact of the previous
// adjustment may not have been taken into account and may
// over-throttle or under-throttle. If clients adjust rate too slowly, they
// will not be responsive to load changes on server side, and may
// over-throttle or under-throttle.
google.protobuf.Duration period = 1;
// If it has been at least one `period` since the last load adjustment, the
// client should multiply the current load by this value to get the new target
// load. For example, if the current load is 100 and `factor` is 0.8, the new
// target load should be 80. After adjusting, the client should ignore
// `factor` until another `period` has passed.
//
// The client can measure its load using any unit that's comparable over time
// For example, QPS can be used as long as each request involves a similar
// amount of work.
double factor = 2;
}
// Request message for Bigtable.CheckAndMutateRow.
message CheckAndMutateRowRequest {
// Optional. The unique name of the table to which the conditional mutation
// should be applied.
//
// Values are of the form
// `projects//instances//tables/
`.
string table_name = 1 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/Table"
}
];
// Optional. The unique name of the AuthorizedView to which the conditional
// mutation should be applied.
//
// Values are of the form
// `projects//instances//tables/
/authorizedViews/`.
string authorized_view_name = 9 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/AuthorizedView"
}
];
// This value specifies routing for replication. If not specified, the
// "default" application profile will be used.
string app_profile_id = 7;
// Required. The key of the row to which the conditional mutation should be
// applied.
bytes row_key = 2 [(google.api.field_behavior) = REQUIRED];
// The filter to be applied to the contents of the specified row. Depending
// on whether or not any results are yielded, either `true_mutations` or
// `false_mutations` will be executed. If unset, checks that the row contains
// any values at all.
RowFilter predicate_filter = 6;
// Changes to be atomically applied to the specified row if `predicate_filter`
// yields at least one cell when applied to `row_key`. Entries are applied in
// order, meaning that earlier mutations can be masked by later ones.
// Must contain at least one entry if `false_mutations` is empty, and at most
// 100000.
repeated Mutation true_mutations = 4;
// Changes to be atomically applied to the specified row if `predicate_filter`
// does not yield any cells when applied to `row_key`. Entries are applied in
// order, meaning that earlier mutations can be masked by later ones.
// Must contain at least one entry if `true_mutations` is empty, and at most
// 100000.
repeated Mutation false_mutations = 5;
}
// Response message for Bigtable.CheckAndMutateRow.
message CheckAndMutateRowResponse {
// Whether or not the request's `predicate_filter` yielded any results for
// the specified row.
bool predicate_matched = 1;
}
// Request message for client connection keep-alive and warming.
message PingAndWarmRequest {
// Required. The unique name of the instance to check permissions for as well
// as respond. Values are of the form
// `projects//instances/`.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/Instance"
}
];
// This value specifies routing for replication. If not specified, the
// "default" application profile will be used.
string app_profile_id = 2;
}
// Response message for Bigtable.PingAndWarm connection keepalive and warming.
message PingAndWarmResponse {}
// Request message for Bigtable.ReadModifyWriteRow.
message ReadModifyWriteRowRequest {
// Optional. The unique name of the table to which the read/modify/write rules
// should be applied.
//
// Values are of the form
// `projects//instances//tables/
`.
string table_name = 1 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/Table"
}
];
// Optional. The unique name of the AuthorizedView to which the
// read/modify/write rules should be applied.
//
// Values are of the form
// `projects//instances//tables/
/authorizedViews/`.
string authorized_view_name = 6 [
(google.api.field_behavior) = OPTIONAL,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/AuthorizedView"
}
];
// This value specifies routing for replication. If not specified, the
// "default" application profile will be used.
string app_profile_id = 4;
// Required. The key of the row to which the read/modify/write rules should be
// applied.
bytes row_key = 2 [(google.api.field_behavior) = REQUIRED];
// Required. Rules specifying how the specified row's contents are to be
// transformed into writes. Entries are applied in order, meaning that earlier
// rules will affect the results of later ones.
repeated ReadModifyWriteRule rules = 3
[(google.api.field_behavior) = REQUIRED];
}
// Response message for Bigtable.ReadModifyWriteRow.
message ReadModifyWriteRowResponse {
// A Row containing the new contents of all cells modified by the request.
Row row = 1;
}
// NOTE: This API is intended to be used by Apache Beam BigtableIO.
// Request message for Bigtable.GenerateInitialChangeStreamPartitions.
message GenerateInitialChangeStreamPartitionsRequest {
// Required. The unique name of the table from which to get change stream
// partitions. Values are of the form
// `projects//instances//tables/
`.
// Change streaming must be enabled on the table.
string table_name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/Table"
}
];
// This value specifies routing for replication. If not specified, the
// "default" application profile will be used.
// Single cluster routing must be configured on the profile.
string app_profile_id = 2;
}
// NOTE: This API is intended to be used by Apache Beam BigtableIO.
// Response message for Bigtable.GenerateInitialChangeStreamPartitions.
message GenerateInitialChangeStreamPartitionsResponse {
// A partition of the change stream.
StreamPartition partition = 1;
}
// NOTE: This API is intended to be used by Apache Beam BigtableIO.
// Request message for Bigtable.ReadChangeStream.
message ReadChangeStreamRequest {
// Required. The unique name of the table from which to read a change stream.
// Values are of the form
// `projects//instances//tables/
`.
// Change streaming must be enabled on the table.
string table_name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/Table"
}
];
// This value specifies routing for replication. If not specified, the
// "default" application profile will be used.
// Single cluster routing must be configured on the profile.
string app_profile_id = 2;
// The partition to read changes from.
StreamPartition partition = 3;
// Options for describing where we want to start reading from the stream.
oneof start_from {
// Start reading the stream at the specified timestamp. This timestamp must
// be within the change stream retention period, less than or equal to the
// current time, and after change stream creation, whichever is greater.
// This value is inclusive and will be truncated to microsecond granularity.
google.protobuf.Timestamp start_time = 4;
// Tokens that describe how to resume reading a stream where reading
// previously left off. If specified, changes will be read starting at the
// the position. Tokens are delivered on the stream as part of `Heartbeat`
// and `CloseStream` messages.
//
// If a single token is provided, the token’s partition must exactly match
// the request’s partition. If multiple tokens are provided, as in the case
// of a partition merge, the union of the token partitions must exactly
// cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
// returned.
StreamContinuationTokens continuation_tokens = 6;
}
// If specified, OK will be returned when the stream advances beyond
// this time. Otherwise, changes will be continuously delivered on the stream.
// This value is inclusive and will be truncated to microsecond granularity.
google.protobuf.Timestamp end_time = 5;
// If specified, the duration between `Heartbeat` messages on the stream.
// Otherwise, defaults to 5 seconds.
google.protobuf.Duration heartbeat_duration = 7;
}
// NOTE: This API is intended to be used by Apache Beam BigtableIO.
// Response message for Bigtable.ReadChangeStream.
message ReadChangeStreamResponse {
// A partial or complete mutation.
message MutationChunk {
// Information about the chunking of this mutation.
// Only `SetCell` mutations can be chunked, and all chunks for a `SetCell`
// will be delivered contiguously with no other mutation types interleaved.
message ChunkInfo {
// The total value size of all the chunks that make up the `SetCell`.
int32 chunked_value_size = 1;
// The byte offset of this chunk into the total value size of the
// mutation.
int32 chunked_value_offset = 2;
// When true, this is the last chunk of a chunked `SetCell`.
bool last_chunk = 3;
}
// If set, then the mutation is a `SetCell` with a chunked value across
// multiple messages.
ChunkInfo chunk_info = 1;
// If this is a continuation of a chunked message (`chunked_value_offset` >
// 0), ignore all fields except the `SetCell`'s value and merge it with
// the previous message by concatenating the value fields.
Mutation mutation = 2;
}
// A message corresponding to one or more mutations to the partition
// being streamed. A single logical `DataChange` message may also be split
// across a sequence of multiple individual messages. Messages other than
// the first in a sequence will only have the `type` and `chunks` fields
// populated, with the final message in the sequence also containing `done`
// set to true.
message DataChange {
// The type of mutation.
enum Type {
// The type is unspecified.
TYPE_UNSPECIFIED = 0;
// A user-initiated mutation.
USER = 1;
// A system-initiated mutation as part of garbage collection.
// https://cloud.google.com/bigtable/docs/garbage-collection
GARBAGE_COLLECTION = 2;
// This is a continuation of a multi-message change.
CONTINUATION = 3;
}
// The type of the mutation.
Type type = 1;
// The cluster where the mutation was applied.
// Not set when `type` is `GARBAGE_COLLECTION`.
string source_cluster_id = 2;
// The row key for all mutations that are part of this `DataChange`.
// If the `DataChange` is chunked across multiple messages, then this field
// will only be set for the first message.
bytes row_key = 3;
// The timestamp at which the mutation was applied on the Bigtable server.
google.protobuf.Timestamp commit_timestamp = 4;
// A value that lets stream consumers reconstruct Bigtable's
// conflict resolution semantics.
// https://cloud.google.com/bigtable/docs/writes#conflict-resolution
// In the event that the same row key, column family, column qualifier,
// timestamp are modified on different clusters at the same
// `commit_timestamp`, the mutation with the larger `tiebreaker` will be the
// one chosen for the eventually consistent state of the system.
int32 tiebreaker = 5;
// The mutations associated with this change to the partition.
// May contain complete mutations or chunks of a multi-message chunked
// `DataChange` record.
repeated MutationChunk chunks = 6;
// When true, indicates that the entire `DataChange` has been read
// and the client can safely process the message.
bool done = 8;
// An encoded position for this stream's partition to restart reading from.
// This token is for the StreamPartition from the request.
string token = 9;
// An estimate of the commit timestamp that is usually lower than or equal
// to any timestamp for a record that will be delivered in the future on the
// stream. It is possible that, under particular circumstances that a future
// record has a timestamp is is lower than a previously seen timestamp. For
// an example usage see
// https://beam.apache.org/documentation/basics/#watermarks
google.protobuf.Timestamp estimated_low_watermark = 10;
}
// A periodic message with information that can be used to checkpoint
// the state of a stream.
message Heartbeat {
// A token that can be provided to a subsequent `ReadChangeStream` call
// to pick up reading at the current stream position.
StreamContinuationToken continuation_token = 1;
// An estimate of the commit timestamp that is usually lower than or equal
// to any timestamp for a record that will be delivered in the future on the
// stream. It is possible that, under particular circumstances that a future
// record has a timestamp is is lower than a previously seen timestamp. For
// an example usage see
// https://beam.apache.org/documentation/basics/#watermarks
google.protobuf.Timestamp estimated_low_watermark = 2;
}
// A message indicating that the client should stop reading from the stream.
// If status is OK and `continuation_tokens` & `new_partitions` are empty, the
// stream has finished (for example if there was an `end_time` specified).
// If `continuation_tokens` & `new_partitions` are present, then a change in
// partitioning requires the client to open a new stream for each token to
// resume reading. Example:
// [B, D) ends
// |
// v
// new_partitions: [A, C) [C, E)
// continuation_tokens.partitions: [B,C) [C,D)
// ^---^ ^---^
// ^ ^
// | |
// | StreamContinuationToken 2
// |
// StreamContinuationToken 1
// To read the new partition [A,C), supply the continuation tokens whose
// ranges cover the new partition, for example ContinuationToken[A,B) &
// ContinuationToken[B,C).
message CloseStream {
// The status of the stream.
google.rpc.Status status = 1;
// If non-empty, contains the information needed to resume reading their
// associated partitions.
repeated StreamContinuationToken continuation_tokens = 2;
// If non-empty, contains the new partitions to start reading from, which
// are related to but not necessarily identical to the partitions for the
// above `continuation_tokens`.
repeated StreamPartition new_partitions = 3;
}
// The data or control message on the stream.
oneof stream_record {
// A mutation to the partition.
DataChange data_change = 1;
// A periodic heartbeat message.
Heartbeat heartbeat = 2;
// An indication that the stream should be closed.
CloseStream close_stream = 3;
}
}
// Request message for Bigtable.ExecuteQuery
message ExecuteQueryRequest {
// Required. The unique name of the instance against which the query should be
// executed.
// Values are of the form `projects//instances/`
string instance_name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "bigtableadmin.googleapis.com/Instance"
}
];
// Optional. This value specifies routing for replication. If not specified,
// the `default` application profile will be used.
string app_profile_id = 2 [(google.api.field_behavior) = OPTIONAL];
// Required. The query string.
string query = 3 [(google.api.field_behavior) = REQUIRED];
// Required. Requested data format for the response.
oneof data_format {
// Protocol buffer format as described by ProtoSchema and ProtoRows
// messages.
ProtoFormat proto_format = 4;
}
// Optional. If this request is resuming a previously interrupted query
// execution, `resume_token` should be copied from the last
// PartialResultSet yielded before the interruption. Doing this
// enables the query execution to resume where the last one left
// off.
// The rest of the request parameters must exactly match the
// request that yielded this token. Otherwise the request will fail.
bytes resume_token = 8 [(google.api.field_behavior) = OPTIONAL];
// Required. params contains string type keys and Bigtable type values that
// bind to placeholders in the query string. In query string, a parameter
// placeholder consists of the
// `@` character followed by the parameter name (for example, `@firstName`) in
// the query string.
//
// For example, if
// `params["firstName"] = bytes_value: "foo" type {bytes_type {}}`
// then `@firstName` will be replaced with googlesql bytes value "foo" in the
// query string during query evaluation.
//
// In case of Value.kind is not set, it will be set to corresponding null
// value in googlesql.
// `params["firstName"] = type {string_type {}}`
// then `@firstName` will be replaced with googlesql null string.
//
// Value.type should always be set and no inference of type will be made from
// Value.kind. If Value.type is not set, we will return INVALID_ARGUMENT
// error.
map params = 7 [(google.api.field_behavior) = REQUIRED];
}
// Response message for Bigtable.ExecuteQuery
message ExecuteQueryResponse {
// The first response streamed from the server is of type `ResultSetMetadata`
// and includes information about the columns and types of the result set.
// From there on, we stream `PartialResultSet` messages with no additional
// information. `PartialResultSet` will contain `resume_token` to restart the
// response if query interrupts. In case of resumption with `resume_token`,
// the server will not resend the ResultSetMetadata.
oneof response {
// Structure of rows in this response stream. The first (and only the first)
// response streamed from the server will be of this type.
ResultSetMetadata metadata = 1;
// A partial result set with row data potentially including additional
// instructions on how recent past and future partial responses should be
// interpreted.
PartialResultSet results = 2;
}
}