
com.google.bigtable.v2.PartialResultSet Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of proto-google-cloud-bigtable-v2 Show documentation
Show all versions of proto-google-cloud-bigtable-v2 Show documentation
PROTO library for proto-google-cloud-bigtable-v2
The newest version!
/*
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/bigtable/v2/data.proto
// Protobuf Java Version: 3.25.8
package com.google.bigtable.v2;
/**
*
*
*
* A partial result set from the streaming query API.
* Cloud Bigtable clients buffer partial results received in this message until
* a `resume_token` is received.
*
* The pseudocode below describes how to buffer and parse a stream of
* `PartialResultSet` messages.
*
* Having:
* - queue of row results waiting to be returned `queue`
* - extensible buffer of bytes `buffer`
* - a place to keep track of the most recent `resume_token`
* for each PartialResultSet `p` received {
* if p.reset {
* ensure `queue` is empty
* ensure `buffer` is empty
* }
* if p.estimated_batch_size != 0 {
* (optional) ensure `buffer` is sized to at least `p.estimated_batch_size`
* }
* if `p.proto_rows_batch` is set {
* append `p.proto_rows_batch.bytes` to `buffer`
* }
* if p.batch_checksum is set and `buffer` is not empty {
* validate the checksum matches the contents of `buffer`
* (see comments on `batch_checksum`)
* parse `buffer` as `ProtoRows` message, clearing `buffer`
* add parsed rows to end of `queue`
* }
* if p.resume_token is set {
* release results in `queue`
* save `p.resume_token` in `resume_token`
* }
* }
*
*
* Protobuf type {@code google.bigtable.v2.PartialResultSet}
*/
public final class PartialResultSet extends com.google.protobuf.GeneratedMessageV3
implements
// @@protoc_insertion_point(message_implements:google.bigtable.v2.PartialResultSet)
PartialResultSetOrBuilder {
private static final long serialVersionUID = 0L;
// Use PartialResultSet.newBuilder() to construct.
private PartialResultSet(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
super(builder);
}
private PartialResultSet() {
resumeToken_ = com.google.protobuf.ByteString.EMPTY;
}
@java.lang.Override
@SuppressWarnings({"unused"})
protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
return new PartialResultSet();
}
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.bigtable.v2.DataProto
.internal_static_google_bigtable_v2_PartialResultSet_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.bigtable.v2.DataProto
.internal_static_google_bigtable_v2_PartialResultSet_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.bigtable.v2.PartialResultSet.class,
com.google.bigtable.v2.PartialResultSet.Builder.class);
}
private int bitField0_;
private int partialRowsCase_ = 0;
@SuppressWarnings("serial")
private java.lang.Object partialRows_;
public enum PartialRowsCase
implements
com.google.protobuf.Internal.EnumLite,
com.google.protobuf.AbstractMessage.InternalOneOfEnum {
PROTO_ROWS_BATCH(3),
PARTIALROWS_NOT_SET(0);
private final int value;
private PartialRowsCase(int value) {
this.value = value;
}
/**
* @param value The number of the enum to look for.
* @return The enum associated with the given number.
* @deprecated Use {@link #forNumber(int)} instead.
*/
@java.lang.Deprecated
public static PartialRowsCase valueOf(int value) {
return forNumber(value);
}
public static PartialRowsCase forNumber(int value) {
switch (value) {
case 3:
return PROTO_ROWS_BATCH;
case 0:
return PARTIALROWS_NOT_SET;
default:
return null;
}
}
public int getNumber() {
return this.value;
}
};
public PartialRowsCase getPartialRowsCase() {
return PartialRowsCase.forNumber(partialRowsCase_);
}
public static final int PROTO_ROWS_BATCH_FIELD_NUMBER = 3;
/**
*
*
*
* Partial rows in serialized ProtoRows format.
*
*
* .google.bigtable.v2.ProtoRowsBatch proto_rows_batch = 3;
*
* @return Whether the protoRowsBatch field is set.
*/
@java.lang.Override
public boolean hasProtoRowsBatch() {
return partialRowsCase_ == 3;
}
/**
*
*
*
* Partial rows in serialized ProtoRows format.
*
*
* .google.bigtable.v2.ProtoRowsBatch proto_rows_batch = 3;
*
* @return The protoRowsBatch.
*/
@java.lang.Override
public com.google.bigtable.v2.ProtoRowsBatch getProtoRowsBatch() {
if (partialRowsCase_ == 3) {
return (com.google.bigtable.v2.ProtoRowsBatch) partialRows_;
}
return com.google.bigtable.v2.ProtoRowsBatch.getDefaultInstance();
}
/**
*
*
*
* Partial rows in serialized ProtoRows format.
*
*
* .google.bigtable.v2.ProtoRowsBatch proto_rows_batch = 3;
*/
@java.lang.Override
public com.google.bigtable.v2.ProtoRowsBatchOrBuilder getProtoRowsBatchOrBuilder() {
if (partialRowsCase_ == 3) {
return (com.google.bigtable.v2.ProtoRowsBatch) partialRows_;
}
return com.google.bigtable.v2.ProtoRowsBatch.getDefaultInstance();
}
public static final int BATCH_CHECKSUM_FIELD_NUMBER = 6;
private int batchChecksum_ = 0;
/**
*
*
*
* CRC32C checksum of concatenated `partial_rows` data for the current batch.
*
* When present, the buffered data from `partial_rows` forms a complete
* parseable message of the appropriate type.
*
* The client should mark the end of a parseable message and prepare to
* receive a new one starting from the next `PartialResultSet` message.
* Clients must verify the checksum of the serialized batch before yielding it
* to the caller.
*
* This does NOT mean the values can be yielded to the callers since a
* `resume_token` is required to safely do so.
*
* If `resume_token` is non-empty and any data has been received since the
* last one, this field is guaranteed to be non-empty. In other words, clients
* may assume that a batch will never cross a `resume_token` boundary.
*
*
* optional uint32 batch_checksum = 6;
*
* @return Whether the batchChecksum field is set.
*/
@java.lang.Override
public boolean hasBatchChecksum() {
return ((bitField0_ & 0x00000001) != 0);
}
/**
*
*
*
* CRC32C checksum of concatenated `partial_rows` data for the current batch.
*
* When present, the buffered data from `partial_rows` forms a complete
* parseable message of the appropriate type.
*
* The client should mark the end of a parseable message and prepare to
* receive a new one starting from the next `PartialResultSet` message.
* Clients must verify the checksum of the serialized batch before yielding it
* to the caller.
*
* This does NOT mean the values can be yielded to the callers since a
* `resume_token` is required to safely do so.
*
* If `resume_token` is non-empty and any data has been received since the
* last one, this field is guaranteed to be non-empty. In other words, clients
* may assume that a batch will never cross a `resume_token` boundary.
*
*
* optional uint32 batch_checksum = 6;
*
* @return The batchChecksum.
*/
@java.lang.Override
public int getBatchChecksum() {
return batchChecksum_;
}
public static final int RESUME_TOKEN_FIELD_NUMBER = 5;
private com.google.protobuf.ByteString resumeToken_ = com.google.protobuf.ByteString.EMPTY;
/**
*
*
*
* An opaque token sent by the server to allow query resumption and signal
* that the buffered values constructed from received `partial_rows` can be
* yielded to the caller. Clients can provide this token in a subsequent
* request to resume the result stream from the current point.
*
* When `resume_token` is non-empty, the buffered values received from
* `partial_rows` since the last non-empty `resume_token` can be yielded to
* the callers, provided that the client keeps the value of `resume_token` and
* uses it on subsequent retries.
*
* A `resume_token` may be sent without information in `partial_rows` to
* checkpoint the progress of a sparse query. Any previous `partial_rows` data
* should still be yielded in this case, and the new `resume_token` should be
* saved for future retries as normal.
*
* A `resume_token` will only be sent on a boundary where there is either no
* ongoing result batch, or `batch_checksum` is also populated.
*
* The server will also send a sentinel `resume_token` when last batch of
* `partial_rows` is sent. If the client retries the ExecuteQueryRequest with
* the sentinel `resume_token`, the server will emit it again without any
* data in `partial_rows`, then return OK.
*
*
* bytes resume_token = 5;
*
* @return The resumeToken.
*/
@java.lang.Override
public com.google.protobuf.ByteString getResumeToken() {
return resumeToken_;
}
public static final int RESET_FIELD_NUMBER = 7;
private boolean reset_ = false;
/**
*
*
*
* If `true`, any data buffered since the last non-empty `resume_token` must
* be discarded before the other parts of this message, if any, are handled.
*
*
* bool reset = 7;
*
* @return The reset.
*/
@java.lang.Override
public boolean getReset() {
return reset_;
}
public static final int ESTIMATED_BATCH_SIZE_FIELD_NUMBER = 4;
private int estimatedBatchSize_ = 0;
/**
*
*
*
* Estimated size of the buffer required to hold the next batch of results.
*
* This value will be sent with the first `partial_rows` of a batch. That is,
* on the first `partial_rows` received in a stream, on the first message
* after a `batch_checksum` message, and any time `reset` is true.
*
* The client can use this estimate to allocate a buffer for the next batch of
* results. This helps minimize the number of allocations required, though the
* buffer size may still need to be increased if the estimate is too low.
*
*
* int32 estimated_batch_size = 4;
*
* @return The estimatedBatchSize.
*/
@java.lang.Override
public int getEstimatedBatchSize() {
return estimatedBatchSize_;
}
private byte memoizedIsInitialized = -1;
@java.lang.Override
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized == 1) return true;
if (isInitialized == 0) return false;
memoizedIsInitialized = 1;
return true;
}
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
if (partialRowsCase_ == 3) {
output.writeMessage(3, (com.google.bigtable.v2.ProtoRowsBatch) partialRows_);
}
if (estimatedBatchSize_ != 0) {
output.writeInt32(4, estimatedBatchSize_);
}
if (!resumeToken_.isEmpty()) {
output.writeBytes(5, resumeToken_);
}
if (((bitField0_ & 0x00000001) != 0)) {
output.writeUInt32(6, batchChecksum_);
}
if (reset_ != false) {
output.writeBool(7, reset_);
}
getUnknownFields().writeTo(output);
}
@java.lang.Override
public int getSerializedSize() {
int size = memoizedSize;
if (size != -1) return size;
size = 0;
if (partialRowsCase_ == 3) {
size +=
com.google.protobuf.CodedOutputStream.computeMessageSize(
3, (com.google.bigtable.v2.ProtoRowsBatch) partialRows_);
}
if (estimatedBatchSize_ != 0) {
size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, estimatedBatchSize_);
}
if (!resumeToken_.isEmpty()) {
size += com.google.protobuf.CodedOutputStream.computeBytesSize(5, resumeToken_);
}
if (((bitField0_ & 0x00000001) != 0)) {
size += com.google.protobuf.CodedOutputStream.computeUInt32Size(6, batchChecksum_);
}
if (reset_ != false) {
size += com.google.protobuf.CodedOutputStream.computeBoolSize(7, reset_);
}
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof com.google.bigtable.v2.PartialResultSet)) {
return super.equals(obj);
}
com.google.bigtable.v2.PartialResultSet other = (com.google.bigtable.v2.PartialResultSet) obj;
if (hasBatchChecksum() != other.hasBatchChecksum()) return false;
if (hasBatchChecksum()) {
if (getBatchChecksum() != other.getBatchChecksum()) return false;
}
if (!getResumeToken().equals(other.getResumeToken())) return false;
if (getReset() != other.getReset()) return false;
if (getEstimatedBatchSize() != other.getEstimatedBatchSize()) return false;
if (!getPartialRowsCase().equals(other.getPartialRowsCase())) return false;
switch (partialRowsCase_) {
case 3:
if (!getProtoRowsBatch().equals(other.getProtoRowsBatch())) return false;
break;
case 0:
default:
}
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptor().hashCode();
if (hasBatchChecksum()) {
hash = (37 * hash) + BATCH_CHECKSUM_FIELD_NUMBER;
hash = (53 * hash) + getBatchChecksum();
}
hash = (37 * hash) + RESUME_TOKEN_FIELD_NUMBER;
hash = (53 * hash) + getResumeToken().hashCode();
hash = (37 * hash) + RESET_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getReset());
hash = (37 * hash) + ESTIMATED_BATCH_SIZE_FIELD_NUMBER;
hash = (53 * hash) + getEstimatedBatchSize();
switch (partialRowsCase_) {
case 3:
hash = (37 * hash) + PROTO_ROWS_BATCH_FIELD_NUMBER;
hash = (53 * hash) + getProtoRowsBatch().hashCode();
break;
case 0:
default:
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static com.google.bigtable.v2.PartialResultSet parseFrom(java.nio.ByteBuffer data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.bigtable.v2.PartialResultSet parseFrom(
java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.bigtable.v2.PartialResultSet parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.bigtable.v2.PartialResultSet parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.bigtable.v2.PartialResultSet parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static com.google.bigtable.v2.PartialResultSet parseFrom(
byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static com.google.bigtable.v2.PartialResultSet parseFrom(java.io.InputStream input)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.bigtable.v2.PartialResultSet parseFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.bigtable.v2.PartialResultSet parseDelimitedFrom(
java.io.InputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
}
public static com.google.bigtable.v2.PartialResultSet parseDelimitedFrom(
java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
PARSER, input, extensionRegistry);
}
public static com.google.bigtable.v2.PartialResultSet parseFrom(
com.google.protobuf.CodedInputStream input) throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
}
public static com.google.bigtable.v2.PartialResultSet parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
PARSER, input, extensionRegistry);
}
@java.lang.Override
public Builder newBuilderForType() {
return newBuilder();
}
public static Builder newBuilder() {
return DEFAULT_INSTANCE.toBuilder();
}
public static Builder newBuilder(com.google.bigtable.v2.PartialResultSet prototype) {
return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
}
@java.lang.Override
public Builder toBuilder() {
return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
}
@java.lang.Override
protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
*
*
*
* A partial result set from the streaming query API.
* Cloud Bigtable clients buffer partial results received in this message until
* a `resume_token` is received.
*
* The pseudocode below describes how to buffer and parse a stream of
* `PartialResultSet` messages.
*
* Having:
* - queue of row results waiting to be returned `queue`
* - extensible buffer of bytes `buffer`
* - a place to keep track of the most recent `resume_token`
* for each PartialResultSet `p` received {
* if p.reset {
* ensure `queue` is empty
* ensure `buffer` is empty
* }
* if p.estimated_batch_size != 0 {
* (optional) ensure `buffer` is sized to at least `p.estimated_batch_size`
* }
* if `p.proto_rows_batch` is set {
* append `p.proto_rows_batch.bytes` to `buffer`
* }
* if p.batch_checksum is set and `buffer` is not empty {
* validate the checksum matches the contents of `buffer`
* (see comments on `batch_checksum`)
* parse `buffer` as `ProtoRows` message, clearing `buffer`
* add parsed rows to end of `queue`
* }
* if p.resume_token is set {
* release results in `queue`
* save `p.resume_token` in `resume_token`
* }
* }
*
*
* Protobuf type {@code google.bigtable.v2.PartialResultSet}
*/
public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
implements
// @@protoc_insertion_point(builder_implements:google.bigtable.v2.PartialResultSet)
com.google.bigtable.v2.PartialResultSetOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
return com.google.bigtable.v2.DataProto
.internal_static_google_bigtable_v2_PartialResultSet_descriptor;
}
@java.lang.Override
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internalGetFieldAccessorTable() {
return com.google.bigtable.v2.DataProto
.internal_static_google_bigtable_v2_PartialResultSet_fieldAccessorTable
.ensureFieldAccessorsInitialized(
com.google.bigtable.v2.PartialResultSet.class,
com.google.bigtable.v2.PartialResultSet.Builder.class);
}
// Construct using com.google.bigtable.v2.PartialResultSet.newBuilder()
private Builder() {}
private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
super(parent);
}
@java.lang.Override
public Builder clear() {
super.clear();
bitField0_ = 0;
if (protoRowsBatchBuilder_ != null) {
protoRowsBatchBuilder_.clear();
}
batchChecksum_ = 0;
resumeToken_ = com.google.protobuf.ByteString.EMPTY;
reset_ = false;
estimatedBatchSize_ = 0;
partialRowsCase_ = 0;
partialRows_ = null;
return this;
}
@java.lang.Override
public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
return com.google.bigtable.v2.DataProto
.internal_static_google_bigtable_v2_PartialResultSet_descriptor;
}
@java.lang.Override
public com.google.bigtable.v2.PartialResultSet getDefaultInstanceForType() {
return com.google.bigtable.v2.PartialResultSet.getDefaultInstance();
}
@java.lang.Override
public com.google.bigtable.v2.PartialResultSet build() {
com.google.bigtable.v2.PartialResultSet result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
@java.lang.Override
public com.google.bigtable.v2.PartialResultSet buildPartial() {
com.google.bigtable.v2.PartialResultSet result =
new com.google.bigtable.v2.PartialResultSet(this);
if (bitField0_ != 0) {
buildPartial0(result);
}
buildPartialOneofs(result);
onBuilt();
return result;
}
private void buildPartial0(com.google.bigtable.v2.PartialResultSet result) {
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000002) != 0)) {
result.batchChecksum_ = batchChecksum_;
to_bitField0_ |= 0x00000001;
}
if (((from_bitField0_ & 0x00000004) != 0)) {
result.resumeToken_ = resumeToken_;
}
if (((from_bitField0_ & 0x00000008) != 0)) {
result.reset_ = reset_;
}
if (((from_bitField0_ & 0x00000010) != 0)) {
result.estimatedBatchSize_ = estimatedBatchSize_;
}
result.bitField0_ |= to_bitField0_;
}
private void buildPartialOneofs(com.google.bigtable.v2.PartialResultSet result) {
result.partialRowsCase_ = partialRowsCase_;
result.partialRows_ = this.partialRows_;
if (partialRowsCase_ == 3 && protoRowsBatchBuilder_ != null) {
result.partialRows_ = protoRowsBatchBuilder_.build();
}
}
@java.lang.Override
public Builder clone() {
return super.clone();
}
@java.lang.Override
public Builder setField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return super.setField(field, value);
}
@java.lang.Override
public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
return super.clearField(field);
}
@java.lang.Override
public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
return super.clearOneof(oneof);
}
@java.lang.Override
public Builder setRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
return super.setRepeatedField(field, index, value);
}
@java.lang.Override
public Builder addRepeatedField(
com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
return super.addRepeatedField(field, value);
}
@java.lang.Override
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof com.google.bigtable.v2.PartialResultSet) {
return mergeFrom((com.google.bigtable.v2.PartialResultSet) other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(com.google.bigtable.v2.PartialResultSet other) {
if (other == com.google.bigtable.v2.PartialResultSet.getDefaultInstance()) return this;
if (other.hasBatchChecksum()) {
setBatchChecksum(other.getBatchChecksum());
}
if (other.getResumeToken() != com.google.protobuf.ByteString.EMPTY) {
setResumeToken(other.getResumeToken());
}
if (other.getReset() != false) {
setReset(other.getReset());
}
if (other.getEstimatedBatchSize() != 0) {
setEstimatedBatchSize(other.getEstimatedBatchSize());
}
switch (other.getPartialRowsCase()) {
case PROTO_ROWS_BATCH:
{
mergeProtoRowsBatch(other.getProtoRowsBatch());
break;
}
case PARTIALROWS_NOT_SET:
{
break;
}
}
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
}
@java.lang.Override
public final boolean isInitialized() {
return true;
}
@java.lang.Override
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
if (extensionRegistry == null) {
throw new java.lang.NullPointerException();
}
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
case 26:
{
input.readMessage(getProtoRowsBatchFieldBuilder().getBuilder(), extensionRegistry);
partialRowsCase_ = 3;
break;
} // case 26
case 32:
{
estimatedBatchSize_ = input.readInt32();
bitField0_ |= 0x00000010;
break;
} // case 32
case 42:
{
resumeToken_ = input.readBytes();
bitField0_ |= 0x00000004;
break;
} // case 42
case 48:
{
batchChecksum_ = input.readUInt32();
bitField0_ |= 0x00000002;
break;
} // case 48
case 56:
{
reset_ = input.readBool();
bitField0_ |= 0x00000008;
break;
} // case 56
default:
{
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
done = true; // was an endgroup tag
}
break;
} // default:
} // switch (tag)
} // while (!done)
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.unwrapIOException();
} finally {
onChanged();
} // finally
return this;
}
private int partialRowsCase_ = 0;
private java.lang.Object partialRows_;
public PartialRowsCase getPartialRowsCase() {
return PartialRowsCase.forNumber(partialRowsCase_);
}
public Builder clearPartialRows() {
partialRowsCase_ = 0;
partialRows_ = null;
onChanged();
return this;
}
private int bitField0_;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.bigtable.v2.ProtoRowsBatch,
com.google.bigtable.v2.ProtoRowsBatch.Builder,
com.google.bigtable.v2.ProtoRowsBatchOrBuilder>
protoRowsBatchBuilder_;
/**
*
*
*
* Partial rows in serialized ProtoRows format.
*
*
* .google.bigtable.v2.ProtoRowsBatch proto_rows_batch = 3;
*
* @return Whether the protoRowsBatch field is set.
*/
@java.lang.Override
public boolean hasProtoRowsBatch() {
return partialRowsCase_ == 3;
}
/**
*
*
*
* Partial rows in serialized ProtoRows format.
*
*
* .google.bigtable.v2.ProtoRowsBatch proto_rows_batch = 3;
*
* @return The protoRowsBatch.
*/
@java.lang.Override
public com.google.bigtable.v2.ProtoRowsBatch getProtoRowsBatch() {
if (protoRowsBatchBuilder_ == null) {
if (partialRowsCase_ == 3) {
return (com.google.bigtable.v2.ProtoRowsBatch) partialRows_;
}
return com.google.bigtable.v2.ProtoRowsBatch.getDefaultInstance();
} else {
if (partialRowsCase_ == 3) {
return protoRowsBatchBuilder_.getMessage();
}
return com.google.bigtable.v2.ProtoRowsBatch.getDefaultInstance();
}
}
/**
*
*
*
* Partial rows in serialized ProtoRows format.
*
*
* .google.bigtable.v2.ProtoRowsBatch proto_rows_batch = 3;
*/
public Builder setProtoRowsBatch(com.google.bigtable.v2.ProtoRowsBatch value) {
if (protoRowsBatchBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
partialRows_ = value;
onChanged();
} else {
protoRowsBatchBuilder_.setMessage(value);
}
partialRowsCase_ = 3;
return this;
}
/**
*
*
*
* Partial rows in serialized ProtoRows format.
*
*
* .google.bigtable.v2.ProtoRowsBatch proto_rows_batch = 3;
*/
public Builder setProtoRowsBatch(
com.google.bigtable.v2.ProtoRowsBatch.Builder builderForValue) {
if (protoRowsBatchBuilder_ == null) {
partialRows_ = builderForValue.build();
onChanged();
} else {
protoRowsBatchBuilder_.setMessage(builderForValue.build());
}
partialRowsCase_ = 3;
return this;
}
/**
*
*
*
* Partial rows in serialized ProtoRows format.
*
*
* .google.bigtable.v2.ProtoRowsBatch proto_rows_batch = 3;
*/
public Builder mergeProtoRowsBatch(com.google.bigtable.v2.ProtoRowsBatch value) {
if (protoRowsBatchBuilder_ == null) {
if (partialRowsCase_ == 3
&& partialRows_ != com.google.bigtable.v2.ProtoRowsBatch.getDefaultInstance()) {
partialRows_ =
com.google.bigtable.v2.ProtoRowsBatch.newBuilder(
(com.google.bigtable.v2.ProtoRowsBatch) partialRows_)
.mergeFrom(value)
.buildPartial();
} else {
partialRows_ = value;
}
onChanged();
} else {
if (partialRowsCase_ == 3) {
protoRowsBatchBuilder_.mergeFrom(value);
} else {
protoRowsBatchBuilder_.setMessage(value);
}
}
partialRowsCase_ = 3;
return this;
}
/**
*
*
*
* Partial rows in serialized ProtoRows format.
*
*
* .google.bigtable.v2.ProtoRowsBatch proto_rows_batch = 3;
*/
public Builder clearProtoRowsBatch() {
if (protoRowsBatchBuilder_ == null) {
if (partialRowsCase_ == 3) {
partialRowsCase_ = 0;
partialRows_ = null;
onChanged();
}
} else {
if (partialRowsCase_ == 3) {
partialRowsCase_ = 0;
partialRows_ = null;
}
protoRowsBatchBuilder_.clear();
}
return this;
}
/**
*
*
*
* Partial rows in serialized ProtoRows format.
*
*
* .google.bigtable.v2.ProtoRowsBatch proto_rows_batch = 3;
*/
public com.google.bigtable.v2.ProtoRowsBatch.Builder getProtoRowsBatchBuilder() {
return getProtoRowsBatchFieldBuilder().getBuilder();
}
/**
*
*
*
* Partial rows in serialized ProtoRows format.
*
*
* .google.bigtable.v2.ProtoRowsBatch proto_rows_batch = 3;
*/
@java.lang.Override
public com.google.bigtable.v2.ProtoRowsBatchOrBuilder getProtoRowsBatchOrBuilder() {
if ((partialRowsCase_ == 3) && (protoRowsBatchBuilder_ != null)) {
return protoRowsBatchBuilder_.getMessageOrBuilder();
} else {
if (partialRowsCase_ == 3) {
return (com.google.bigtable.v2.ProtoRowsBatch) partialRows_;
}
return com.google.bigtable.v2.ProtoRowsBatch.getDefaultInstance();
}
}
/**
*
*
*
* Partial rows in serialized ProtoRows format.
*
*
* .google.bigtable.v2.ProtoRowsBatch proto_rows_batch = 3;
*/
private com.google.protobuf.SingleFieldBuilderV3<
com.google.bigtable.v2.ProtoRowsBatch,
com.google.bigtable.v2.ProtoRowsBatch.Builder,
com.google.bigtable.v2.ProtoRowsBatchOrBuilder>
getProtoRowsBatchFieldBuilder() {
if (protoRowsBatchBuilder_ == null) {
if (!(partialRowsCase_ == 3)) {
partialRows_ = com.google.bigtable.v2.ProtoRowsBatch.getDefaultInstance();
}
protoRowsBatchBuilder_ =
new com.google.protobuf.SingleFieldBuilderV3<
com.google.bigtable.v2.ProtoRowsBatch,
com.google.bigtable.v2.ProtoRowsBatch.Builder,
com.google.bigtable.v2.ProtoRowsBatchOrBuilder>(
(com.google.bigtable.v2.ProtoRowsBatch) partialRows_,
getParentForChildren(),
isClean());
partialRows_ = null;
}
partialRowsCase_ = 3;
onChanged();
return protoRowsBatchBuilder_;
}
private int batchChecksum_;
/**
*
*
*
* CRC32C checksum of concatenated `partial_rows` data for the current batch.
*
* When present, the buffered data from `partial_rows` forms a complete
* parseable message of the appropriate type.
*
* The client should mark the end of a parseable message and prepare to
* receive a new one starting from the next `PartialResultSet` message.
* Clients must verify the checksum of the serialized batch before yielding it
* to the caller.
*
* This does NOT mean the values can be yielded to the callers since a
* `resume_token` is required to safely do so.
*
* If `resume_token` is non-empty and any data has been received since the
* last one, this field is guaranteed to be non-empty. In other words, clients
* may assume that a batch will never cross a `resume_token` boundary.
*
*
* optional uint32 batch_checksum = 6;
*
* @return Whether the batchChecksum field is set.
*/
@java.lang.Override
public boolean hasBatchChecksum() {
return ((bitField0_ & 0x00000002) != 0);
}
/**
*
*
*
* CRC32C checksum of concatenated `partial_rows` data for the current batch.
*
* When present, the buffered data from `partial_rows` forms a complete
* parseable message of the appropriate type.
*
* The client should mark the end of a parseable message and prepare to
* receive a new one starting from the next `PartialResultSet` message.
* Clients must verify the checksum of the serialized batch before yielding it
* to the caller.
*
* This does NOT mean the values can be yielded to the callers since a
* `resume_token` is required to safely do so.
*
* If `resume_token` is non-empty and any data has been received since the
* last one, this field is guaranteed to be non-empty. In other words, clients
* may assume that a batch will never cross a `resume_token` boundary.
*
*
* optional uint32 batch_checksum = 6;
*
* @return The batchChecksum.
*/
@java.lang.Override
public int getBatchChecksum() {
return batchChecksum_;
}
/**
*
*
*
* CRC32C checksum of concatenated `partial_rows` data for the current batch.
*
* When present, the buffered data from `partial_rows` forms a complete
* parseable message of the appropriate type.
*
* The client should mark the end of a parseable message and prepare to
* receive a new one starting from the next `PartialResultSet` message.
* Clients must verify the checksum of the serialized batch before yielding it
* to the caller.
*
* This does NOT mean the values can be yielded to the callers since a
* `resume_token` is required to safely do so.
*
* If `resume_token` is non-empty and any data has been received since the
* last one, this field is guaranteed to be non-empty. In other words, clients
* may assume that a batch will never cross a `resume_token` boundary.
*
*
* optional uint32 batch_checksum = 6;
*
* @param value The batchChecksum to set.
* @return This builder for chaining.
*/
public Builder setBatchChecksum(int value) {
batchChecksum_ = value;
bitField0_ |= 0x00000002;
onChanged();
return this;
}
/**
*
*
*
* CRC32C checksum of concatenated `partial_rows` data for the current batch.
*
* When present, the buffered data from `partial_rows` forms a complete
* parseable message of the appropriate type.
*
* The client should mark the end of a parseable message and prepare to
* receive a new one starting from the next `PartialResultSet` message.
* Clients must verify the checksum of the serialized batch before yielding it
* to the caller.
*
* This does NOT mean the values can be yielded to the callers since a
* `resume_token` is required to safely do so.
*
* If `resume_token` is non-empty and any data has been received since the
* last one, this field is guaranteed to be non-empty. In other words, clients
* may assume that a batch will never cross a `resume_token` boundary.
*
*
* optional uint32 batch_checksum = 6;
*
* @return This builder for chaining.
*/
public Builder clearBatchChecksum() {
bitField0_ = (bitField0_ & ~0x00000002);
batchChecksum_ = 0;
onChanged();
return this;
}
private com.google.protobuf.ByteString resumeToken_ = com.google.protobuf.ByteString.EMPTY;
/**
*
*
*
* An opaque token sent by the server to allow query resumption and signal
* that the buffered values constructed from received `partial_rows` can be
* yielded to the caller. Clients can provide this token in a subsequent
* request to resume the result stream from the current point.
*
* When `resume_token` is non-empty, the buffered values received from
* `partial_rows` since the last non-empty `resume_token` can be yielded to
* the callers, provided that the client keeps the value of `resume_token` and
* uses it on subsequent retries.
*
* A `resume_token` may be sent without information in `partial_rows` to
* checkpoint the progress of a sparse query. Any previous `partial_rows` data
* should still be yielded in this case, and the new `resume_token` should be
* saved for future retries as normal.
*
* A `resume_token` will only be sent on a boundary where there is either no
* ongoing result batch, or `batch_checksum` is also populated.
*
* The server will also send a sentinel `resume_token` when last batch of
* `partial_rows` is sent. If the client retries the ExecuteQueryRequest with
* the sentinel `resume_token`, the server will emit it again without any
* data in `partial_rows`, then return OK.
*
*
* bytes resume_token = 5;
*
* @return The resumeToken.
*/
@java.lang.Override
public com.google.protobuf.ByteString getResumeToken() {
return resumeToken_;
}
/**
*
*
*
* An opaque token sent by the server to allow query resumption and signal
* that the buffered values constructed from received `partial_rows` can be
* yielded to the caller. Clients can provide this token in a subsequent
* request to resume the result stream from the current point.
*
* When `resume_token` is non-empty, the buffered values received from
* `partial_rows` since the last non-empty `resume_token` can be yielded to
* the callers, provided that the client keeps the value of `resume_token` and
* uses it on subsequent retries.
*
* A `resume_token` may be sent without information in `partial_rows` to
* checkpoint the progress of a sparse query. Any previous `partial_rows` data
* should still be yielded in this case, and the new `resume_token` should be
* saved for future retries as normal.
*
* A `resume_token` will only be sent on a boundary where there is either no
* ongoing result batch, or `batch_checksum` is also populated.
*
* The server will also send a sentinel `resume_token` when last batch of
* `partial_rows` is sent. If the client retries the ExecuteQueryRequest with
* the sentinel `resume_token`, the server will emit it again without any
* data in `partial_rows`, then return OK.
*
*
* bytes resume_token = 5;
*
* @param value The resumeToken to set.
* @return This builder for chaining.
*/
public Builder setResumeToken(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
resumeToken_ = value;
bitField0_ |= 0x00000004;
onChanged();
return this;
}
/**
*
*
*
* An opaque token sent by the server to allow query resumption and signal
* that the buffered values constructed from received `partial_rows` can be
* yielded to the caller. Clients can provide this token in a subsequent
* request to resume the result stream from the current point.
*
* When `resume_token` is non-empty, the buffered values received from
* `partial_rows` since the last non-empty `resume_token` can be yielded to
* the callers, provided that the client keeps the value of `resume_token` and
* uses it on subsequent retries.
*
* A `resume_token` may be sent without information in `partial_rows` to
* checkpoint the progress of a sparse query. Any previous `partial_rows` data
* should still be yielded in this case, and the new `resume_token` should be
* saved for future retries as normal.
*
* A `resume_token` will only be sent on a boundary where there is either no
* ongoing result batch, or `batch_checksum` is also populated.
*
* The server will also send a sentinel `resume_token` when last batch of
* `partial_rows` is sent. If the client retries the ExecuteQueryRequest with
* the sentinel `resume_token`, the server will emit it again without any
* data in `partial_rows`, then return OK.
*
*
* bytes resume_token = 5;
*
* @return This builder for chaining.
*/
public Builder clearResumeToken() {
bitField0_ = (bitField0_ & ~0x00000004);
resumeToken_ = getDefaultInstance().getResumeToken();
onChanged();
return this;
}
private boolean reset_;
/**
*
*
*
* If `true`, any data buffered since the last non-empty `resume_token` must
* be discarded before the other parts of this message, if any, are handled.
*
*
* bool reset = 7;
*
* @return The reset.
*/
@java.lang.Override
public boolean getReset() {
return reset_;
}
/**
*
*
*
* If `true`, any data buffered since the last non-empty `resume_token` must
* be discarded before the other parts of this message, if any, are handled.
*
*
* bool reset = 7;
*
* @param value The reset to set.
* @return This builder for chaining.
*/
public Builder setReset(boolean value) {
reset_ = value;
bitField0_ |= 0x00000008;
onChanged();
return this;
}
/**
*
*
*
* If `true`, any data buffered since the last non-empty `resume_token` must
* be discarded before the other parts of this message, if any, are handled.
*
*
* bool reset = 7;
*
* @return This builder for chaining.
*/
public Builder clearReset() {
bitField0_ = (bitField0_ & ~0x00000008);
reset_ = false;
onChanged();
return this;
}
private int estimatedBatchSize_;
/**
*
*
*
* Estimated size of the buffer required to hold the next batch of results.
*
* This value will be sent with the first `partial_rows` of a batch. That is,
* on the first `partial_rows` received in a stream, on the first message
* after a `batch_checksum` message, and any time `reset` is true.
*
* The client can use this estimate to allocate a buffer for the next batch of
* results. This helps minimize the number of allocations required, though the
* buffer size may still need to be increased if the estimate is too low.
*
*
* int32 estimated_batch_size = 4;
*
* @return The estimatedBatchSize.
*/
@java.lang.Override
public int getEstimatedBatchSize() {
return estimatedBatchSize_;
}
/**
*
*
*
* Estimated size of the buffer required to hold the next batch of results.
*
* This value will be sent with the first `partial_rows` of a batch. That is,
* on the first `partial_rows` received in a stream, on the first message
* after a `batch_checksum` message, and any time `reset` is true.
*
* The client can use this estimate to allocate a buffer for the next batch of
* results. This helps minimize the number of allocations required, though the
* buffer size may still need to be increased if the estimate is too low.
*
*
* int32 estimated_batch_size = 4;
*
* @param value The estimatedBatchSize to set.
* @return This builder for chaining.
*/
public Builder setEstimatedBatchSize(int value) {
estimatedBatchSize_ = value;
bitField0_ |= 0x00000010;
onChanged();
return this;
}
/**
*
*
*
* Estimated size of the buffer required to hold the next batch of results.
*
* This value will be sent with the first `partial_rows` of a batch. That is,
* on the first `partial_rows` received in a stream, on the first message
* after a `batch_checksum` message, and any time `reset` is true.
*
* The client can use this estimate to allocate a buffer for the next batch of
* results. This helps minimize the number of allocations required, though the
* buffer size may still need to be increased if the estimate is too low.
*
*
* int32 estimated_batch_size = 4;
*
* @return This builder for chaining.
*/
public Builder clearEstimatedBatchSize() {
bitField0_ = (bitField0_ & ~0x00000010);
estimatedBatchSize_ = 0;
onChanged();
return this;
}
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
}
@java.lang.Override
public final Builder mergeUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.mergeUnknownFields(unknownFields);
}
// @@protoc_insertion_point(builder_scope:google.bigtable.v2.PartialResultSet)
}
// @@protoc_insertion_point(class_scope:google.bigtable.v2.PartialResultSet)
private static final com.google.bigtable.v2.PartialResultSet DEFAULT_INSTANCE;
static {
DEFAULT_INSTANCE = new com.google.bigtable.v2.PartialResultSet();
}
public static com.google.bigtable.v2.PartialResultSet getDefaultInstance() {
return DEFAULT_INSTANCE;
}
private static final com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
@java.lang.Override
public PartialResultSet parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
Builder builder = newBuilder();
try {
builder.mergeFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(builder.buildPartial());
} catch (com.google.protobuf.UninitializedMessageException e) {
throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(e)
.setUnfinishedMessage(builder.buildPartial());
}
return builder.buildPartial();
}
};
public static com.google.protobuf.Parser parser() {
return PARSER;
}
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
@java.lang.Override
public com.google.bigtable.v2.PartialResultSet getDefaultInstanceForType() {
return DEFAULT_INSTANCE;
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy