Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: Client.proto
package org.apache.hadoop.hbase.protobuf.generated;
@javax.annotation.Generated("proto") public final class ClientProtos {
private ClientProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
/**
* Protobuf enum {@code hbase.pb.Consistency}
*
*
**
* Consistency defines the expected consistency level for an operation.
*
*/
public enum Consistency
implements com.google.protobuf.ProtocolMessageEnum {
/**
* STRONG = 0;
*/
STRONG(0, 0),
/**
* TIMELINE = 1;
*/
TIMELINE(1, 1),
;
/**
* STRONG = 0;
*/
public static final int STRONG_VALUE = 0;
/**
* TIMELINE = 1;
*/
public static final int TIMELINE_VALUE = 1;
public final int getNumber() { return value; }
public static Consistency valueOf(int value) {
switch (value) {
case 0: return STRONG;
case 1: return TIMELINE;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public Consistency findValueByNumber(int number) {
return Consistency.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor().getEnumTypes().get(0);
}
private static final Consistency[] VALUES = values();
public static Consistency valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private Consistency(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hbase.pb.Consistency)
}
public interface AuthorizationsOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated string label = 1;
/**
* repeated string label = 1;
*/
java.util.List
getLabelList();
/**
* repeated string label = 1;
*/
int getLabelCount();
/**
* repeated string label = 1;
*/
java.lang.String getLabel(int index);
/**
* repeated string label = 1;
*/
com.google.protobuf.ByteString
getLabelBytes(int index);
}
/**
* Protobuf type {@code hbase.pb.Authorizations}
*
*
**
* The protocol buffer version of Authorizations.
*
*/
@javax.annotation.Generated("proto") public static final class Authorizations extends
com.google.protobuf.GeneratedMessage
implements AuthorizationsOrBuilder {
// Use Authorizations.newBuilder() to construct.
private Authorizations(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Authorizations(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Authorizations defaultInstance;
public static Authorizations getDefaultInstance() {
return defaultInstance;
}
public Authorizations getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Authorizations(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
label_ = new com.google.protobuf.LazyStringArrayList();
mutable_bitField0_ |= 0x00000001;
}
label_.add(input.readBytes());
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
label_ = new com.google.protobuf.UnmodifiableLazyStringList(label_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Authorizations_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Authorizations_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public Authorizations parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Authorizations(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
// repeated string label = 1;
public static final int LABEL_FIELD_NUMBER = 1;
private com.google.protobuf.LazyStringList label_;
/**
* repeated string label = 1;
*/
public java.util.List
getLabelList() {
return label_;
}
/**
* repeated string label = 1;
*/
public int getLabelCount() {
return label_.size();
}
/**
* repeated string label = 1;
*/
public java.lang.String getLabel(int index) {
return label_.get(index);
}
/**
* repeated string label = 1;
*/
public com.google.protobuf.ByteString
getLabelBytes(int index) {
return label_.getByteString(index);
}
private void initFields() {
label_ = com.google.protobuf.LazyStringArrayList.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < label_.size(); i++) {
output.writeBytes(1, label_.getByteString(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < label_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(label_.getByteString(i));
}
size += dataSize;
size += 1 * getLabelList().size();
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations) obj;
boolean result = true;
result = result && getLabelList()
.equals(other.getLabelList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getLabelCount() > 0) {
hash = (37 * hash) + LABEL_FIELD_NUMBER;
hash = (53 * hash) + getLabelList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Authorizations prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.Authorizations}
*
*
**
* The protocol buffer version of Authorizations.
*
* The below count is set when the associated cells are
* not part of this protobuf message; they are passed alongside
* and then this Message is just a placeholder with metadata.
* The count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
* The below count is set when the associated cells are
* not part of this protobuf message; they are passed alongside
* and then this Message is just a placeholder with metadata.
* The count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
* Whether or not the entire result could be returned. Results will be split when
* the RPC chunk size limit is reached. Partial results contain only a subset of the
* cells for a row and must be combined with a result containing the remaining cells
* to form a complete result. The equivalent flag in o.a.h.h.client.Result is
* mayHaveMoreCellsInRow.
*
* Whether or not the entire result could be returned. Results will be split when
* the RPC chunk size limit is reached. Partial results contain only a subset of the
* cells for a row and must be combined with a result containing the remaining cells
* to form a complete result. The equivalent flag in o.a.h.h.client.Result is
* mayHaveMoreCellsInRow.
*
*/
boolean getPartial();
}
/**
* Protobuf type {@code hbase.pb.Result}
*/
@javax.annotation.Generated("proto") public static final class Result extends
com.google.protobuf.GeneratedMessage
implements ResultOrBuilder {
// Use Result.newBuilder() to construct.
private Result(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Result(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Result defaultInstance;
public static Result getDefaultInstance() {
return defaultInstance;
}
public Result getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Result(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
cell_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
cell_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.PARSER, extensionRegistry));
break;
}
case 16: {
bitField0_ |= 0x00000001;
associatedCellCount_ = input.readInt32();
break;
}
case 24: {
bitField0_ |= 0x00000002;
exists_ = input.readBool();
break;
}
case 32: {
bitField0_ |= 0x00000004;
stale_ = input.readBool();
break;
}
case 40: {
bitField0_ |= 0x00000008;
partial_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
cell_ = java.util.Collections.unmodifiableList(cell_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Result_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Result_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public Result parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Result(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// repeated .hbase.pb.Cell cell = 1;
public static final int CELL_FIELD_NUMBER = 1;
private java.util.List cell_;
/**
* repeated .hbase.pb.Cell cell = 1;
*
*
* Result includes the Cells or else it just has a count of Cells
* that are carried otherwise.
*
* Result includes the Cells or else it just has a count of Cells
* that are carried otherwise.
*
*/
public org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder getCellOrBuilder(
int index) {
return cell_.get(index);
}
// optional int32 associated_cell_count = 2;
public static final int ASSOCIATED_CELL_COUNT_FIELD_NUMBER = 2;
private int associatedCellCount_;
/**
* optional int32 associated_cell_count = 2;
*
*
* The below count is set when the associated cells are
* not part of this protobuf message; they are passed alongside
* and then this Message is just a placeholder with metadata.
* The count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
* The below count is set when the associated cells are
* not part of this protobuf message; they are passed alongside
* and then this Message is just a placeholder with metadata.
* The count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
*/
public int getAssociatedCellCount() {
return associatedCellCount_;
}
// optional bool exists = 3;
public static final int EXISTS_FIELD_NUMBER = 3;
private boolean exists_;
/**
* optional bool exists = 3;
*
*
* used for Get to check existence only. Not set if existence_only was not set to true
* in the query.
*
* Whether or not the results are coming from possibly stale data
*
*/
public boolean getStale() {
return stale_;
}
// optional bool partial = 5 [default = false];
public static final int PARTIAL_FIELD_NUMBER = 5;
private boolean partial_;
/**
* optional bool partial = 5 [default = false];
*
*
* Whether or not the entire result could be returned. Results will be split when
* the RPC chunk size limit is reached. Partial results contain only a subset of the
* cells for a row and must be combined with a result containing the remaining cells
* to form a complete result. The equivalent flag in o.a.h.h.client.Result is
* mayHaveMoreCellsInRow.
*
* Whether or not the entire result could be returned. Results will be split when
* the RPC chunk size limit is reached. Partial results contain only a subset of the
* cells for a row and must be combined with a result containing the remaining cells
* to form a complete result. The equivalent flag in o.a.h.h.client.Result is
* mayHaveMoreCellsInRow.
*
*/
public boolean getPartial() {
return partial_;
}
private void initFields() {
cell_ = java.util.Collections.emptyList();
associatedCellCount_ = 0;
exists_ = false;
stale_ = false;
partial_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < cell_.size(); i++) {
output.writeMessage(1, cell_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeInt32(2, associatedCellCount_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(3, exists_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBool(4, stale_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBool(5, partial_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < cell_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, cell_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(2, associatedCellCount_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(3, exists_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(4, stale_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(5, partial_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result) obj;
boolean result = true;
result = result && getCellList()
.equals(other.getCellList());
result = result && (hasAssociatedCellCount() == other.hasAssociatedCellCount());
if (hasAssociatedCellCount()) {
result = result && (getAssociatedCellCount()
== other.getAssociatedCellCount());
}
result = result && (hasExists() == other.hasExists());
if (hasExists()) {
result = result && (getExists()
== other.getExists());
}
result = result && (hasStale() == other.hasStale());
if (hasStale()) {
result = result && (getStale()
== other.getStale());
}
result = result && (hasPartial() == other.hasPartial());
if (hasPartial()) {
result = result && (getPartial()
== other.getPartial());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getCellCount() > 0) {
hash = (37 * hash) + CELL_FIELD_NUMBER;
hash = (53 * hash) + getCellList().hashCode();
}
if (hasAssociatedCellCount()) {
hash = (37 * hash) + ASSOCIATED_CELL_COUNT_FIELD_NUMBER;
hash = (53 * hash) + getAssociatedCellCount();
}
if (hasExists()) {
hash = (37 * hash) + EXISTS_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getExists());
}
if (hasStale()) {
hash = (37 * hash) + STALE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getStale());
}
if (hasPartial()) {
hash = (37 * hash) + PARTIAL_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getPartial());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.Result}
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Result_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Result_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getCellFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (cellBuilder_ == null) {
cell_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
cellBuilder_.clear();
}
associatedCellCount_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
exists_ = false;
bitField0_ = (bitField0_ & ~0x00000004);
stale_ = false;
bitField0_ = (bitField0_ & ~0x00000008);
partial_ = false;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Result_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result build() {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (cellBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
cell_ = java.util.Collections.unmodifiableList(cell_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.cell_ = cell_;
} else {
result.cell_ = cellBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000001;
}
result.associatedCellCount_ = associatedCellCount_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000002;
}
result.exists_ = exists_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000004;
}
result.stale_ = stale_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000008;
}
result.partial_ = partial_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.getDefaultInstance()) return this;
if (cellBuilder_ == null) {
if (!other.cell_.isEmpty()) {
if (cell_.isEmpty()) {
cell_ = other.cell_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureCellIsMutable();
cell_.addAll(other.cell_);
}
onChanged();
}
} else {
if (!other.cell_.isEmpty()) {
if (cellBuilder_.isEmpty()) {
cellBuilder_.dispose();
cellBuilder_ = null;
cell_ = other.cell_;
bitField0_ = (bitField0_ & ~0x00000001);
cellBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getCellFieldBuilder() : null;
} else {
cellBuilder_.addAllMessages(other.cell_);
}
}
}
if (other.hasAssociatedCellCount()) {
setAssociatedCellCount(other.getAssociatedCellCount());
}
if (other.hasExists()) {
setExists(other.getExists());
}
if (other.hasStale()) {
setStale(other.getStale());
}
if (other.hasPartial()) {
setPartial(other.getPartial());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .hbase.pb.Cell cell = 1;
private java.util.List cell_ =
java.util.Collections.emptyList();
private void ensureCellIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
cell_ = new java.util.ArrayList(cell_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell, org.apache.hadoop.hbase.protobuf.generated.CellProtos.Cell.Builder, org.apache.hadoop.hbase.protobuf.generated.CellProtos.CellOrBuilder> cellBuilder_;
/**
* repeated .hbase.pb.Cell cell = 1;
*
*
* Result includes the Cells or else it just has a count of Cells
* that are carried otherwise.
*
* The below count is set when the associated cells are
* not part of this protobuf message; they are passed alongside
* and then this Message is just a placeholder with metadata.
* The count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
* The below count is set when the associated cells are
* not part of this protobuf message; they are passed alongside
* and then this Message is just a placeholder with metadata.
* The count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
*/
public int getAssociatedCellCount() {
return associatedCellCount_;
}
/**
* optional int32 associated_cell_count = 2;
*
*
* The below count is set when the associated cells are
* not part of this protobuf message; they are passed alongside
* and then this Message is just a placeholder with metadata.
* The count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
* The below count is set when the associated cells are
* not part of this protobuf message; they are passed alongside
* and then this Message is just a placeholder with metadata.
* The count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
* Whether or not the entire result could be returned. Results will be split when
* the RPC chunk size limit is reached. Partial results contain only a subset of the
* cells for a row and must be combined with a result containing the remaining cells
* to form a complete result. The equivalent flag in o.a.h.h.client.Result is
* mayHaveMoreCellsInRow.
*
* Whether or not the entire result could be returned. Results will be split when
* the RPC chunk size limit is reached. Partial results contain only a subset of the
* cells for a row and must be combined with a result containing the remaining cells
* to form a complete result. The equivalent flag in o.a.h.h.client.Result is
* mayHaveMoreCellsInRow.
*
* Whether or not the entire result could be returned. Results will be split when
* the RPC chunk size limit is reached. Partial results contain only a subset of the
* cells for a row and must be combined with a result containing the remaining cells
* to form a complete result. The equivalent flag in o.a.h.h.client.Result is
* mayHaveMoreCellsInRow.
*
* Whether or not the entire result could be returned. Results will be split when
* the RPC chunk size limit is reached. Partial results contain only a subset of the
* cells for a row and must be combined with a result containing the remaining cells
* to form a complete result. The equivalent flag in o.a.h.h.client.Result is
* mayHaveMoreCellsInRow.
*
*/
public Builder clearPartial() {
bitField0_ = (bitField0_ & ~0x00000010);
partial_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.Result)
}
static {
defaultInstance = new Result(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.Result)
}
public interface GetRequestOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hbase.pb.RegionSpecifier region = 1;
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
boolean hasRegion();
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion();
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder();
// required .hbase.pb.Get get = 2;
/**
* required .hbase.pb.Get get = 2;
*/
boolean hasGet();
/**
* required .hbase.pb.Get get = 2;
*/
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get getGet();
/**
* required .hbase.pb.Get get = 2;
*/
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetOrBuilder getGetOrBuilder();
}
/**
* Protobuf type {@code hbase.pb.GetRequest}
*
*
**
* The get request. Perform a single Get operation.
*
*/
@javax.annotation.Generated("proto") public static final class GetRequest extends
com.google.protobuf.GeneratedMessage
implements GetRequestOrBuilder {
// Use GetRequest.newBuilder() to construct.
private GetRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetRequest defaultInstance;
public static GetRequest getDefaultInstance() {
return defaultInstance;
}
public GetRequest getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetRequest(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = region_.toBuilder();
}
region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(region_);
region_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = get_.toBuilder();
}
get_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(get_);
get_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_GetRequest_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_GetRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetRequest parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetRequest(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hbase.pb.RegionSpecifier region = 1;
public static final int REGION_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_;
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public boolean hasRegion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() {
return region_;
}
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() {
return region_;
}
// required .hbase.pb.Get get = 2;
public static final int GET_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get get_;
/**
* required .hbase.pb.Get get = 2;
*/
public boolean hasGet() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hbase.pb.Get get = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get getGet() {
return get_;
}
/**
* required .hbase.pb.Get get = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetOrBuilder getGetOrBuilder() {
return get_;
}
private void initFields() {
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
get_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasGet()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegion().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getGet().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, region_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, get_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, region_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, get_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest) obj;
boolean result = true;
result = result && (hasRegion() == other.hasRegion());
if (hasRegion()) {
result = result && getRegion()
.equals(other.getRegion());
}
result = result && (hasGet() == other.hasGet());
if (hasGet()) {
result = result && getGet()
.equals(other.getGet());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegion()) {
hash = (37 * hash) + REGION_FIELD_NUMBER;
hash = (53 * hash) + getRegion().hashCode();
}
if (hasGet()) {
hash = (37 * hash) + GET_FIELD_NUMBER;
hash = (53 * hash) + getGet().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.GetRequest}
*
*
**
* The get request. Perform a single Get operation.
*
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequestOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_GetRequest_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_GetRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegionFieldBuilder();
getGetFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (regionBuilder_ == null) {
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
} else {
regionBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (getBuilder_ == null) {
get_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.getDefaultInstance();
} else {
getBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_GetRequest_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest build() {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (regionBuilder_ == null) {
result.region_ = region_;
} else {
result.region_ = regionBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (getBuilder_ == null) {
result.get_ = get_;
} else {
result.get_ = getBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest.getDefaultInstance()) return this;
if (other.hasRegion()) {
mergeRegion(other.getRegion());
}
if (other.hasGet()) {
mergeGet(other.getGet());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegion()) {
return false;
}
if (!hasGet()) {
return false;
}
if (!getRegion().isInitialized()) {
return false;
}
if (!getGet().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hbase.pb.RegionSpecifier region = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_;
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public boolean hasRegion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() {
if (regionBuilder_ == null) {
return region_;
} else {
return regionBuilder_.getMessage();
}
}
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
if (regionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
region_ = value;
onChanged();
} else {
regionBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public Builder setRegion(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
if (regionBuilder_ == null) {
region_ = builderForValue.build();
onChanged();
} else {
regionBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
if (regionBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
region_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial();
} else {
region_ = value;
}
onChanged();
} else {
regionBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public Builder clearRegion() {
if (regionBuilder_ == null) {
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
onChanged();
} else {
regionBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegionFieldBuilder().getBuilder();
}
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() {
if (regionBuilder_ != null) {
return regionBuilder_.getMessageOrBuilder();
} else {
return region_;
}
}
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
getRegionFieldBuilder() {
if (regionBuilder_ == null) {
regionBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
region_,
getParentForChildren(),
isClean());
region_ = null;
}
return regionBuilder_;
}
// required .hbase.pb.Get get = 2;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get get_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetOrBuilder> getBuilder_;
/**
* required .hbase.pb.Get get = 2;
*/
public boolean hasGet() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hbase.pb.Get get = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get getGet() {
if (getBuilder_ == null) {
return get_;
} else {
return getBuilder_.getMessage();
}
}
/**
* required .hbase.pb.Get get = 2;
*/
public Builder setGet(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get value) {
if (getBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
get_ = value;
onChanged();
} else {
getBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hbase.pb.Get get = 2;
*/
public Builder setGet(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder builderForValue) {
if (getBuilder_ == null) {
get_ = builderForValue.build();
onChanged();
} else {
getBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hbase.pb.Get get = 2;
*/
public Builder mergeGet(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get value) {
if (getBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
get_ != org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.getDefaultInstance()) {
get_ =
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.newBuilder(get_).mergeFrom(value).buildPartial();
} else {
get_ = value;
}
onChanged();
} else {
getBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hbase.pb.Get get = 2;
*/
public Builder clearGet() {
if (getBuilder_ == null) {
get_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.getDefaultInstance();
onChanged();
} else {
getBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* required .hbase.pb.Get get = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder getGetBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getGetFieldBuilder().getBuilder();
}
/**
* required .hbase.pb.Get get = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetOrBuilder getGetOrBuilder() {
if (getBuilder_ != null) {
return getBuilder_.getMessageOrBuilder();
} else {
return get_;
}
}
/**
* required .hbase.pb.Get get = 2;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetOrBuilder>
getGetFieldBuilder() {
if (getBuilder_ == null) {
getBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Get.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetOrBuilder>(
get_,
getParentForChildren(),
isClean());
get_ = null;
}
return getBuilder_;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.GetRequest)
}
static {
defaultInstance = new GetRequest(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.GetRequest)
}
public interface GetResponseOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional .hbase.pb.Result result = 1;
/**
* optional .hbase.pb.Result result = 1;
*/
boolean hasResult();
/**
* optional .hbase.pb.Result result = 1;
*/
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result getResult();
/**
* optional .hbase.pb.Result result = 1;
*/
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultOrBuilder();
}
/**
* Protobuf type {@code hbase.pb.GetResponse}
*/
@javax.annotation.Generated("proto") public static final class GetResponse extends
com.google.protobuf.GeneratedMessage
implements GetResponseOrBuilder {
// Use GetResponse.newBuilder() to construct.
private GetResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetResponse defaultInstance;
public static GetResponse getDefaultInstance() {
return defaultInstance;
}
public GetResponse getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetResponse(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = result_.toBuilder();
}
result_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(result_);
result_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_GetResponse_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_GetResponse_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetResponse parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetResponse(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional .hbase.pb.Result result = 1;
public static final int RESULT_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result result_;
/**
* optional .hbase.pb.Result result = 1;
*/
public boolean hasResult() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional .hbase.pb.Result result = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result getResult() {
return result_;
}
/**
* optional .hbase.pb.Result result = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultOrBuilder() {
return result_;
}
private void initFields() {
result_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, result_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, result_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse) obj;
boolean result = true;
result = result && (hasResult() == other.hasResult());
if (hasResult()) {
result = result && getResult()
.equals(other.getResult());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasResult()) {
hash = (37 * hash) + RESULT_FIELD_NUMBER;
hash = (53 * hash) + getResult().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.GetResponse}
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponseOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_GetResponse_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_GetResponse_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getResultFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (resultBuilder_ == null) {
result_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.getDefaultInstance();
} else {
resultBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_GetResponse_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse build() {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (resultBuilder_ == null) {
result.result_ = result_;
} else {
result.result_ = resultBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse.getDefaultInstance()) return this;
if (other.hasResult()) {
mergeResult(other.getResult());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional .hbase.pb.Result result = 1;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result result_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder> resultBuilder_;
/**
* optional .hbase.pb.Result result = 1;
*/
public boolean hasResult() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional .hbase.pb.Result result = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result getResult() {
if (resultBuilder_ == null) {
return result_;
} else {
return resultBuilder_.getMessage();
}
}
/**
* optional .hbase.pb.Result result = 1;
*/
public Builder setResult(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result value) {
if (resultBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
result_ = value;
onChanged();
} else {
resultBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* optional .hbase.pb.Result result = 1;
*/
public Builder setResult(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder builderForValue) {
if (resultBuilder_ == null) {
result_ = builderForValue.build();
onChanged();
} else {
resultBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* optional .hbase.pb.Result result = 1;
*/
public Builder mergeResult(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result value) {
if (resultBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
result_ != org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.getDefaultInstance()) {
result_ =
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.newBuilder(result_).mergeFrom(value).buildPartial();
} else {
result_ = value;
}
onChanged();
} else {
resultBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* optional .hbase.pb.Result result = 1;
*/
public Builder clearResult() {
if (resultBuilder_ == null) {
result_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.getDefaultInstance();
onChanged();
} else {
resultBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* optional .hbase.pb.Result result = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder getResultBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getResultFieldBuilder().getBuilder();
}
/**
* optional .hbase.pb.Result result = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultOrBuilder() {
if (resultBuilder_ != null) {
return resultBuilder_.getMessageOrBuilder();
} else {
return result_;
}
}
/**
* optional .hbase.pb.Result result = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder>
getResultFieldBuilder() {
if (resultBuilder_ == null) {
resultBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder>(
result_,
getParentForChildren(),
isClean());
result_ = null;
}
return resultBuilder_;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.GetResponse)
}
static {
defaultInstance = new GetResponse(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.GetResponse)
}
public interface ConditionOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bytes row = 1;
/**
* required bytes row = 1;
*/
boolean hasRow();
/**
* required bytes row = 1;
*/
com.google.protobuf.ByteString getRow();
// optional bytes family = 2;
/**
* optional bytes family = 2;
*/
boolean hasFamily();
/**
* optional bytes family = 2;
*/
com.google.protobuf.ByteString getFamily();
// optional bytes qualifier = 3;
/**
* optional bytes qualifier = 3;
*/
boolean hasQualifier();
/**
* optional bytes qualifier = 3;
*/
com.google.protobuf.ByteString getQualifier();
// optional .hbase.pb.CompareType compare_type = 4;
/**
* optional .hbase.pb.CompareType compare_type = 4;
*/
boolean hasCompareType();
/**
* optional .hbase.pb.CompareType compare_type = 4;
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType getCompareType();
// optional .hbase.pb.Comparator comparator = 5;
/**
* optional .hbase.pb.Comparator comparator = 5;
*/
boolean hasComparator();
/**
* optional .hbase.pb.Comparator comparator = 5;
*/
org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.Comparator getComparator();
/**
* optional .hbase.pb.Comparator comparator = 5;
*/
org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.ComparatorOrBuilder getComparatorOrBuilder();
// optional .hbase.pb.TimeRange time_range = 6;
/**
* optional .hbase.pb.TimeRange time_range = 6;
*/
boolean hasTimeRange();
/**
* optional .hbase.pb.TimeRange time_range = 6;
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getTimeRange();
/**
* optional .hbase.pb.TimeRange time_range = 6;
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder();
// optional .hbase.pb.Filter filter = 7;
/**
* optional .hbase.pb.Filter filter = 7;
*/
boolean hasFilter();
/**
* optional .hbase.pb.Filter filter = 7;
*/
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter();
/**
* optional .hbase.pb.Filter filter = 7;
*/
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder();
}
/**
* Protobuf type {@code hbase.pb.Condition}
*
*
**
* Condition to check if the value of a given cell (row,
* family, qualifier) matches a value via a given comparator.
*
* Condition is used in check and mutate operations.
*
*/
@javax.annotation.Generated("proto") public static final class Condition extends
com.google.protobuf.GeneratedMessage
implements ConditionOrBuilder {
// Use Condition.newBuilder() to construct.
private Condition(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Condition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Condition defaultInstance;
public static Condition getDefaultInstance() {
return defaultInstance;
}
public Condition getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Condition(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
row_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
family_ = input.readBytes();
break;
}
case 26: {
bitField0_ |= 0x00000004;
qualifier_ = input.readBytes();
break;
}
case 32: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
bitField0_ |= 0x00000008;
compareType_ = value;
}
break;
}
case 42: {
org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.Comparator.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = comparator_.toBuilder();
}
comparator_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.Comparator.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(comparator_);
comparator_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
case 50: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder subBuilder = null;
if (((bitField0_ & 0x00000020) == 0x00000020)) {
subBuilder = timeRange_.toBuilder();
}
timeRange_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(timeRange_);
timeRange_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000020;
break;
}
case 58: {
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder subBuilder = null;
if (((bitField0_ & 0x00000040) == 0x00000040)) {
subBuilder = filter_.toBuilder();
}
filter_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(filter_);
filter_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000040;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Condition_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Condition_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public Condition parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Condition(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required bytes row = 1;
public static final int ROW_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString row_;
/**
* required bytes row = 1;
*/
public boolean hasRow() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bytes row = 1;
*/
public com.google.protobuf.ByteString getRow() {
return row_;
}
// optional bytes family = 2;
public static final int FAMILY_FIELD_NUMBER = 2;
private com.google.protobuf.ByteString family_;
/**
* optional bytes family = 2;
*/
public boolean hasFamily() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bytes family = 2;
*/
public com.google.protobuf.ByteString getFamily() {
return family_;
}
// optional bytes qualifier = 3;
public static final int QUALIFIER_FIELD_NUMBER = 3;
private com.google.protobuf.ByteString qualifier_;
/**
* optional bytes qualifier = 3;
*/
public boolean hasQualifier() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional bytes qualifier = 3;
*/
public com.google.protobuf.ByteString getQualifier() {
return qualifier_;
}
// optional .hbase.pb.CompareType compare_type = 4;
public static final int COMPARE_TYPE_FIELD_NUMBER = 4;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType compareType_;
/**
* optional .hbase.pb.CompareType compare_type = 4;
*/
public boolean hasCompareType() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hbase.pb.CompareType compare_type = 4;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType getCompareType() {
return compareType_;
}
// optional .hbase.pb.Comparator comparator = 5;
public static final int COMPARATOR_FIELD_NUMBER = 5;
private org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.Comparator comparator_;
/**
* optional .hbase.pb.Comparator comparator = 5;
*/
public boolean hasComparator() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional .hbase.pb.Comparator comparator = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.Comparator getComparator() {
return comparator_;
}
/**
* optional .hbase.pb.Comparator comparator = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.ComparatorOrBuilder getComparatorOrBuilder() {
return comparator_;
}
// optional .hbase.pb.TimeRange time_range = 6;
public static final int TIME_RANGE_FIELD_NUMBER = 6;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange timeRange_;
/**
* optional .hbase.pb.TimeRange time_range = 6;
*/
public boolean hasTimeRange() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional .hbase.pb.TimeRange time_range = 6;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getTimeRange() {
return timeRange_;
}
/**
* optional .hbase.pb.TimeRange time_range = 6;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder() {
return timeRange_;
}
// optional .hbase.pb.Filter filter = 7;
public static final int FILTER_FIELD_NUMBER = 7;
private org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter filter_;
/**
* optional .hbase.pb.Filter filter = 7;
*/
public boolean hasFilter() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional .hbase.pb.Filter filter = 7;
*/
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter() {
return filter_;
}
/**
* optional .hbase.pb.Filter filter = 7;
*/
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder() {
return filter_;
}
private void initFields() {
row_ = com.google.protobuf.ByteString.EMPTY;
family_ = com.google.protobuf.ByteString.EMPTY;
qualifier_ = com.google.protobuf.ByteString.EMPTY;
compareType_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType.LESS;
comparator_ = org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.Comparator.getDefaultInstance();
timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRow()) {
memoizedIsInitialized = 0;
return false;
}
if (hasComparator()) {
if (!getComparator().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasFilter()) {
if (!getFilter().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, row_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, family_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, qualifier_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeEnum(4, compareType_.getNumber());
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(5, comparator_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeMessage(6, timeRange_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeMessage(7, filter_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, row_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, family_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, qualifier_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(4, compareType_.getNumber());
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, comparator_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, timeRange_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, filter_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition) obj;
boolean result = true;
result = result && (hasRow() == other.hasRow());
if (hasRow()) {
result = result && getRow()
.equals(other.getRow());
}
result = result && (hasFamily() == other.hasFamily());
if (hasFamily()) {
result = result && getFamily()
.equals(other.getFamily());
}
result = result && (hasQualifier() == other.hasQualifier());
if (hasQualifier()) {
result = result && getQualifier()
.equals(other.getQualifier());
}
result = result && (hasCompareType() == other.hasCompareType());
if (hasCompareType()) {
result = result &&
(getCompareType() == other.getCompareType());
}
result = result && (hasComparator() == other.hasComparator());
if (hasComparator()) {
result = result && getComparator()
.equals(other.getComparator());
}
result = result && (hasTimeRange() == other.hasTimeRange());
if (hasTimeRange()) {
result = result && getTimeRange()
.equals(other.getTimeRange());
}
result = result && (hasFilter() == other.hasFilter());
if (hasFilter()) {
result = result && getFilter()
.equals(other.getFilter());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRow()) {
hash = (37 * hash) + ROW_FIELD_NUMBER;
hash = (53 * hash) + getRow().hashCode();
}
if (hasFamily()) {
hash = (37 * hash) + FAMILY_FIELD_NUMBER;
hash = (53 * hash) + getFamily().hashCode();
}
if (hasQualifier()) {
hash = (37 * hash) + QUALIFIER_FIELD_NUMBER;
hash = (53 * hash) + getQualifier().hashCode();
}
if (hasCompareType()) {
hash = (37 * hash) + COMPARE_TYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getCompareType());
}
if (hasComparator()) {
hash = (37 * hash) + COMPARATOR_FIELD_NUMBER;
hash = (53 * hash) + getComparator().hashCode();
}
if (hasTimeRange()) {
hash = (37 * hash) + TIME_RANGE_FIELD_NUMBER;
hash = (53 * hash) + getTimeRange().hashCode();
}
if (hasFilter()) {
hash = (37 * hash) + FILTER_FIELD_NUMBER;
hash = (53 * hash) + getFilter().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.Condition}
*
*
**
* Condition to check if the value of a given cell (row,
* family, qualifier) matches a value via a given comparator.
*
* Condition is used in check and mutate operations.
*
* The below count is set when the associated cells are NOT
* part of this protobuf message; they are passed alongside
* and then this Message is a placeholder with metadata. The
* count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
* The below count is set when the associated cells are NOT
* part of this protobuf message; they are passed alongside
* and then this Message is a placeholder with metadata. The
* count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
**
* A specific mutation inside a mutate request.
* It can be an append, increment, put or delete based
* on the mutation type. It can be fully filled in or
* only metadata present because data is being carried
* elsewhere outside of pb.
*
*/
@javax.annotation.Generated("proto") public static final class MutationProto extends
com.google.protobuf.GeneratedMessage
implements MutationProtoOrBuilder {
// Use MutationProto.newBuilder() to construct.
private MutationProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private MutationProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final MutationProto defaultInstance;
public static MutationProto getDefaultInstance() {
return defaultInstance;
}
public MutationProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private MutationProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
row_ = input.readBytes();
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
mutateType_ = value;
}
break;
}
case 26: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
columnValue_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
columnValue_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.PARSER, extensionRegistry));
break;
}
case 32: {
bitField0_ |= 0x00000004;
timestamp_ = input.readUInt64();
break;
}
case 42: {
if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
attribute_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000010;
}
attribute_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.PARSER, extensionRegistry));
break;
}
case 48: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Durability value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Durability.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(6, rawValue);
} else {
bitField0_ |= 0x00000008;
durability_ = value;
}
break;
}
case 58: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder subBuilder = null;
if (((bitField0_ & 0x00000010) == 0x00000010)) {
subBuilder = timeRange_.toBuilder();
}
timeRange_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(timeRange_);
timeRange_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000010;
break;
}
case 64: {
bitField0_ |= 0x00000020;
associatedCellCount_ = input.readInt32();
break;
}
case 72: {
bitField0_ |= 0x00000040;
nonce_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
columnValue_ = java.util.Collections.unmodifiableList(columnValue_);
}
if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
attribute_ = java.util.Collections.unmodifiableList(attribute_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutationProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutationProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public MutationProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new MutationProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hbase.pb.MutationProto.Durability}
*/
public enum Durability
implements com.google.protobuf.ProtocolMessageEnum {
/**
* USE_DEFAULT = 0;
*/
USE_DEFAULT(0, 0),
/**
* SKIP_WAL = 1;
*/
SKIP_WAL(1, 1),
/**
* ASYNC_WAL = 2;
*/
ASYNC_WAL(2, 2),
/**
* SYNC_WAL = 3;
*/
SYNC_WAL(3, 3),
/**
* FSYNC_WAL = 4;
*/
FSYNC_WAL(4, 4),
;
/**
* USE_DEFAULT = 0;
*/
public static final int USE_DEFAULT_VALUE = 0;
/**
* SKIP_WAL = 1;
*/
public static final int SKIP_WAL_VALUE = 1;
/**
* ASYNC_WAL = 2;
*/
public static final int ASYNC_WAL_VALUE = 2;
/**
* SYNC_WAL = 3;
*/
public static final int SYNC_WAL_VALUE = 3;
/**
* FSYNC_WAL = 4;
*/
public static final int FSYNC_WAL_VALUE = 4;
public final int getNumber() { return value; }
public static Durability valueOf(int value) {
switch (value) {
case 0: return USE_DEFAULT;
case 1: return SKIP_WAL;
case 2: return ASYNC_WAL;
case 3: return SYNC_WAL;
case 4: return FSYNC_WAL;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public Durability findValueByNumber(int number) {
return Durability.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.getDescriptor().getEnumTypes().get(0);
}
private static final Durability[] VALUES = values();
public static Durability valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private Durability(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hbase.pb.MutationProto.Durability)
}
/**
* Protobuf enum {@code hbase.pb.MutationProto.MutationType}
*/
public enum MutationType
implements com.google.protobuf.ProtocolMessageEnum {
/**
* APPEND = 0;
*/
APPEND(0, 0),
/**
* INCREMENT = 1;
*/
INCREMENT(1, 1),
/**
* PUT = 2;
*/
PUT(2, 2),
/**
* DELETE = 3;
*/
DELETE(3, 3),
;
/**
* APPEND = 0;
*/
public static final int APPEND_VALUE = 0;
/**
* INCREMENT = 1;
*/
public static final int INCREMENT_VALUE = 1;
/**
* PUT = 2;
*/
public static final int PUT_VALUE = 2;
/**
* DELETE = 3;
*/
public static final int DELETE_VALUE = 3;
public final int getNumber() { return value; }
public static MutationType valueOf(int value) {
switch (value) {
case 0: return APPEND;
case 1: return INCREMENT;
case 2: return PUT;
case 3: return DELETE;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public MutationType findValueByNumber(int number) {
return MutationType.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.getDescriptor().getEnumTypes().get(1);
}
private static final MutationType[] VALUES = values();
public static MutationType valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private MutationType(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hbase.pb.MutationProto.MutationType)
}
/**
* Protobuf enum {@code hbase.pb.MutationProto.DeleteType}
*/
public enum DeleteType
implements com.google.protobuf.ProtocolMessageEnum {
/**
* DELETE_ONE_VERSION = 0;
*/
DELETE_ONE_VERSION(0, 0),
/**
* DELETE_MULTIPLE_VERSIONS = 1;
*/
DELETE_MULTIPLE_VERSIONS(1, 1),
/**
* DELETE_FAMILY = 2;
*/
DELETE_FAMILY(2, 2),
/**
* DELETE_FAMILY_VERSION = 3;
*/
DELETE_FAMILY_VERSION(3, 3),
;
/**
* DELETE_ONE_VERSION = 0;
*/
public static final int DELETE_ONE_VERSION_VALUE = 0;
/**
* DELETE_MULTIPLE_VERSIONS = 1;
*/
public static final int DELETE_MULTIPLE_VERSIONS_VALUE = 1;
/**
* DELETE_FAMILY = 2;
*/
public static final int DELETE_FAMILY_VALUE = 2;
/**
* DELETE_FAMILY_VERSION = 3;
*/
public static final int DELETE_FAMILY_VERSION_VALUE = 3;
public final int getNumber() { return value; }
public static DeleteType valueOf(int value) {
switch (value) {
case 0: return DELETE_ONE_VERSION;
case 1: return DELETE_MULTIPLE_VERSIONS;
case 2: return DELETE_FAMILY;
case 3: return DELETE_FAMILY_VERSION;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public DeleteType findValueByNumber(int number) {
return DeleteType.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.getDescriptor().getEnumTypes().get(2);
}
private static final DeleteType[] VALUES = values();
public static DeleteType valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private DeleteType(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hbase.pb.MutationProto.DeleteType)
}
public interface ColumnValueOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bytes family = 1;
/**
* required bytes family = 1;
*/
boolean hasFamily();
/**
* required bytes family = 1;
*/
com.google.protobuf.ByteString getFamily();
// repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
java.util.List
getQualifierValueList();
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue getQualifierValue(int index);
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
int getQualifierValueCount();
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
java.util.List extends org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValueOrBuilder>
getQualifierValueOrBuilderList();
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValueOrBuilder getQualifierValueOrBuilder(
int index);
}
/**
* Protobuf type {@code hbase.pb.MutationProto.ColumnValue}
*/
@javax.annotation.Generated("proto") public static final class ColumnValue extends
com.google.protobuf.GeneratedMessage
implements ColumnValueOrBuilder {
// Use ColumnValue.newBuilder() to construct.
private ColumnValue(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ColumnValue(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ColumnValue defaultInstance;
public static ColumnValue getDefaultInstance() {
return defaultInstance;
}
public ColumnValue getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ColumnValue(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
family_ = input.readBytes();
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
qualifierValue_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
qualifierValue_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
qualifierValue_ = java.util.Collections.unmodifiableList(qualifierValue_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutationProto_ColumnValue_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutationProto_ColumnValue_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ColumnValue parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ColumnValue(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
public interface QualifierValueOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional bytes qualifier = 1;
/**
* optional bytes qualifier = 1;
*/
boolean hasQualifier();
/**
* optional bytes qualifier = 1;
*/
com.google.protobuf.ByteString getQualifier();
// optional bytes value = 2;
/**
* optional bytes value = 2;
*/
boolean hasValue();
/**
* optional bytes value = 2;
*/
com.google.protobuf.ByteString getValue();
// optional uint64 timestamp = 3;
/**
* optional uint64 timestamp = 3;
*/
boolean hasTimestamp();
/**
* optional uint64 timestamp = 3;
*/
long getTimestamp();
// optional .hbase.pb.MutationProto.DeleteType delete_type = 4;
/**
* optional .hbase.pb.MutationProto.DeleteType delete_type = 4;
*/
boolean hasDeleteType();
/**
* optional .hbase.pb.MutationProto.DeleteType delete_type = 4;
*/
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType getDeleteType();
// optional bytes tags = 5;
/**
* optional bytes tags = 5;
*/
boolean hasTags();
/**
* optional bytes tags = 5;
*/
com.google.protobuf.ByteString getTags();
}
/**
* Protobuf type {@code hbase.pb.MutationProto.ColumnValue.QualifierValue}
*/
@javax.annotation.Generated("proto") public static final class QualifierValue extends
com.google.protobuf.GeneratedMessage
implements QualifierValueOrBuilder {
// Use QualifierValue.newBuilder() to construct.
private QualifierValue(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private QualifierValue(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final QualifierValue defaultInstance;
public static QualifierValue getDefaultInstance() {
return defaultInstance;
}
public QualifierValue getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private QualifierValue(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
qualifier_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
value_ = input.readBytes();
break;
}
case 24: {
bitField0_ |= 0x00000004;
timestamp_ = input.readUInt64();
break;
}
case 32: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(4, rawValue);
} else {
bitField0_ |= 0x00000008;
deleteType_ = value;
}
break;
}
case 42: {
bitField0_ |= 0x00000010;
tags_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutationProto_ColumnValue_QualifierValue_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutationProto_ColumnValue_QualifierValue_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public QualifierValue parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new QualifierValue(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional bytes qualifier = 1;
public static final int QUALIFIER_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString qualifier_;
/**
* optional bytes qualifier = 1;
*/
public boolean hasQualifier() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional bytes qualifier = 1;
*/
public com.google.protobuf.ByteString getQualifier() {
return qualifier_;
}
// optional bytes value = 2;
public static final int VALUE_FIELD_NUMBER = 2;
private com.google.protobuf.ByteString value_;
/**
* optional bytes value = 2;
*/
public boolean hasValue() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bytes value = 2;
*/
public com.google.protobuf.ByteString getValue() {
return value_;
}
// optional uint64 timestamp = 3;
public static final int TIMESTAMP_FIELD_NUMBER = 3;
private long timestamp_;
/**
* optional uint64 timestamp = 3;
*/
public boolean hasTimestamp() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 timestamp = 3;
*/
public long getTimestamp() {
return timestamp_;
}
// optional .hbase.pb.MutationProto.DeleteType delete_type = 4;
public static final int DELETE_TYPE_FIELD_NUMBER = 4;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType deleteType_;
/**
* optional .hbase.pb.MutationProto.DeleteType delete_type = 4;
*/
public boolean hasDeleteType() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hbase.pb.MutationProto.DeleteType delete_type = 4;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType getDeleteType() {
return deleteType_;
}
// optional bytes tags = 5;
public static final int TAGS_FIELD_NUMBER = 5;
private com.google.protobuf.ByteString tags_;
/**
* optional bytes tags = 5;
*/
public boolean hasTags() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bytes tags = 5;
*/
public com.google.protobuf.ByteString getTags() {
return tags_;
}
private void initFields() {
qualifier_ = com.google.protobuf.ByteString.EMPTY;
value_ = com.google.protobuf.ByteString.EMPTY;
timestamp_ = 0L;
deleteType_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType.DELETE_ONE_VERSION;
tags_ = com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, qualifier_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, value_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, timestamp_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeEnum(4, deleteType_.getNumber());
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBytes(5, tags_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, qualifier_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, value_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, timestamp_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(4, deleteType_.getNumber());
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(5, tags_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) obj;
boolean result = true;
result = result && (hasQualifier() == other.hasQualifier());
if (hasQualifier()) {
result = result && getQualifier()
.equals(other.getQualifier());
}
result = result && (hasValue() == other.hasValue());
if (hasValue()) {
result = result && getValue()
.equals(other.getValue());
}
result = result && (hasTimestamp() == other.hasTimestamp());
if (hasTimestamp()) {
result = result && (getTimestamp()
== other.getTimestamp());
}
result = result && (hasDeleteType() == other.hasDeleteType());
if (hasDeleteType()) {
result = result &&
(getDeleteType() == other.getDeleteType());
}
result = result && (hasTags() == other.hasTags());
if (hasTags()) {
result = result && getTags()
.equals(other.getTags());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasQualifier()) {
hash = (37 * hash) + QUALIFIER_FIELD_NUMBER;
hash = (53 * hash) + getQualifier().hashCode();
}
if (hasValue()) {
hash = (37 * hash) + VALUE_FIELD_NUMBER;
hash = (53 * hash) + getValue().hashCode();
}
if (hasTimestamp()) {
hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTimestamp());
}
if (hasDeleteType()) {
hash = (37 * hash) + DELETE_TYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getDeleteType());
}
if (hasTags()) {
hash = (37 * hash) + TAGS_FIELD_NUMBER;
hash = (53 * hash) + getTags().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.MutationProto.ColumnValue.QualifierValue}
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValueOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutationProto_ColumnValue_QualifierValue_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutationProto_ColumnValue_QualifierValue_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
qualifier_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
value_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000002);
timestamp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
deleteType_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType.DELETE_ONE_VERSION;
bitField0_ = (bitField0_ & ~0x00000008);
tags_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutationProto_ColumnValue_QualifierValue_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue build() {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.qualifier_ = qualifier_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.value_ = value_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.timestamp_ = timestamp_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.deleteType_ = deleteType_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.tags_ = tags_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.getDefaultInstance()) return this;
if (other.hasQualifier()) {
setQualifier(other.getQualifier());
}
if (other.hasValue()) {
setValue(other.getValue());
}
if (other.hasTimestamp()) {
setTimestamp(other.getTimestamp());
}
if (other.hasDeleteType()) {
setDeleteType(other.getDeleteType());
}
if (other.hasTags()) {
setTags(other.getTags());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional bytes qualifier = 1;
private com.google.protobuf.ByteString qualifier_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes qualifier = 1;
*/
public boolean hasQualifier() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional bytes qualifier = 1;
*/
public com.google.protobuf.ByteString getQualifier() {
return qualifier_;
}
/**
* optional bytes qualifier = 1;
*/
public Builder setQualifier(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
qualifier_ = value;
onChanged();
return this;
}
/**
* optional bytes qualifier = 1;
*/
public Builder clearQualifier() {
bitField0_ = (bitField0_ & ~0x00000001);
qualifier_ = getDefaultInstance().getQualifier();
onChanged();
return this;
}
// optional bytes value = 2;
private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes value = 2;
*/
public boolean hasValue() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bytes value = 2;
*/
public com.google.protobuf.ByteString getValue() {
return value_;
}
/**
* optional bytes value = 2;
*/
public Builder setValue(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
value_ = value;
onChanged();
return this;
}
/**
* optional bytes value = 2;
*/
public Builder clearValue() {
bitField0_ = (bitField0_ & ~0x00000002);
value_ = getDefaultInstance().getValue();
onChanged();
return this;
}
// optional uint64 timestamp = 3;
private long timestamp_ ;
/**
* optional uint64 timestamp = 3;
*/
public boolean hasTimestamp() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 timestamp = 3;
*/
public long getTimestamp() {
return timestamp_;
}
/**
* optional uint64 timestamp = 3;
*/
public Builder setTimestamp(long value) {
bitField0_ |= 0x00000004;
timestamp_ = value;
onChanged();
return this;
}
/**
* optional uint64 timestamp = 3;
*/
public Builder clearTimestamp() {
bitField0_ = (bitField0_ & ~0x00000004);
timestamp_ = 0L;
onChanged();
return this;
}
// optional .hbase.pb.MutationProto.DeleteType delete_type = 4;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType deleteType_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType.DELETE_ONE_VERSION;
/**
* optional .hbase.pb.MutationProto.DeleteType delete_type = 4;
*/
public boolean hasDeleteType() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hbase.pb.MutationProto.DeleteType delete_type = 4;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType getDeleteType() {
return deleteType_;
}
/**
* optional .hbase.pb.MutationProto.DeleteType delete_type = 4;
*/
public Builder setDeleteType(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
deleteType_ = value;
onChanged();
return this;
}
/**
* optional .hbase.pb.MutationProto.DeleteType delete_type = 4;
*/
public Builder clearDeleteType() {
bitField0_ = (bitField0_ & ~0x00000008);
deleteType_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType.DELETE_ONE_VERSION;
onChanged();
return this;
}
// optional bytes tags = 5;
private com.google.protobuf.ByteString tags_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes tags = 5;
*/
public boolean hasTags() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bytes tags = 5;
*/
public com.google.protobuf.ByteString getTags() {
return tags_;
}
/**
* optional bytes tags = 5;
*/
public Builder setTags(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
tags_ = value;
onChanged();
return this;
}
/**
* optional bytes tags = 5;
*/
public Builder clearTags() {
bitField0_ = (bitField0_ & ~0x00000010);
tags_ = getDefaultInstance().getTags();
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.MutationProto.ColumnValue.QualifierValue)
}
static {
defaultInstance = new QualifierValue(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.MutationProto.ColumnValue.QualifierValue)
}
private int bitField0_;
// required bytes family = 1;
public static final int FAMILY_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString family_;
/**
* required bytes family = 1;
*/
public boolean hasFamily() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bytes family = 1;
*/
public com.google.protobuf.ByteString getFamily() {
return family_;
}
// repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
public static final int QUALIFIER_VALUE_FIELD_NUMBER = 2;
private java.util.List qualifierValue_;
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public java.util.List getQualifierValueList() {
return qualifierValue_;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValueOrBuilder>
getQualifierValueOrBuilderList() {
return qualifierValue_;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public int getQualifierValueCount() {
return qualifierValue_.size();
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue getQualifierValue(int index) {
return qualifierValue_.get(index);
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValueOrBuilder getQualifierValueOrBuilder(
int index) {
return qualifierValue_.get(index);
}
private void initFields() {
family_ = com.google.protobuf.ByteString.EMPTY;
qualifierValue_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasFamily()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, family_);
}
for (int i = 0; i < qualifierValue_.size(); i++) {
output.writeMessage(2, qualifierValue_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, family_);
}
for (int i = 0; i < qualifierValue_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, qualifierValue_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue) obj;
boolean result = true;
result = result && (hasFamily() == other.hasFamily());
if (hasFamily()) {
result = result && getFamily()
.equals(other.getFamily());
}
result = result && getQualifierValueList()
.equals(other.getQualifierValueList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasFamily()) {
hash = (37 * hash) + FAMILY_FIELD_NUMBER;
hash = (53 * hash) + getFamily().hashCode();
}
if (getQualifierValueCount() > 0) {
hash = (37 * hash) + QUALIFIER_VALUE_FIELD_NUMBER;
hash = (53 * hash) + getQualifierValueList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.MutationProto.ColumnValue}
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValueOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutationProto_ColumnValue_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutationProto_ColumnValue_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getQualifierValueFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
family_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
if (qualifierValueBuilder_ == null) {
qualifierValue_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
qualifierValueBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutationProto_ColumnValue_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue build() {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.family_ = family_;
if (qualifierValueBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
qualifierValue_ = java.util.Collections.unmodifiableList(qualifierValue_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.qualifierValue_ = qualifierValue_;
} else {
result.qualifierValue_ = qualifierValueBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.getDefaultInstance()) return this;
if (other.hasFamily()) {
setFamily(other.getFamily());
}
if (qualifierValueBuilder_ == null) {
if (!other.qualifierValue_.isEmpty()) {
if (qualifierValue_.isEmpty()) {
qualifierValue_ = other.qualifierValue_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureQualifierValueIsMutable();
qualifierValue_.addAll(other.qualifierValue_);
}
onChanged();
}
} else {
if (!other.qualifierValue_.isEmpty()) {
if (qualifierValueBuilder_.isEmpty()) {
qualifierValueBuilder_.dispose();
qualifierValueBuilder_ = null;
qualifierValue_ = other.qualifierValue_;
bitField0_ = (bitField0_ & ~0x00000002);
qualifierValueBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getQualifierValueFieldBuilder() : null;
} else {
qualifierValueBuilder_.addAllMessages(other.qualifierValue_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasFamily()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bytes family = 1;
private com.google.protobuf.ByteString family_ = com.google.protobuf.ByteString.EMPTY;
/**
* required bytes family = 1;
*/
public boolean hasFamily() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required bytes family = 1;
*/
public com.google.protobuf.ByteString getFamily() {
return family_;
}
/**
* required bytes family = 1;
*/
public Builder setFamily(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
family_ = value;
onChanged();
return this;
}
/**
* required bytes family = 1;
*/
public Builder clearFamily() {
bitField0_ = (bitField0_ & ~0x00000001);
family_ = getDefaultInstance().getFamily();
onChanged();
return this;
}
// repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
private java.util.List qualifierValue_ =
java.util.Collections.emptyList();
private void ensureQualifierValueIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
qualifierValue_ = new java.util.ArrayList(qualifierValue_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValueOrBuilder> qualifierValueBuilder_;
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public java.util.List getQualifierValueList() {
if (qualifierValueBuilder_ == null) {
return java.util.Collections.unmodifiableList(qualifierValue_);
} else {
return qualifierValueBuilder_.getMessageList();
}
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public int getQualifierValueCount() {
if (qualifierValueBuilder_ == null) {
return qualifierValue_.size();
} else {
return qualifierValueBuilder_.getCount();
}
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue getQualifierValue(int index) {
if (qualifierValueBuilder_ == null) {
return qualifierValue_.get(index);
} else {
return qualifierValueBuilder_.getMessage(index);
}
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public Builder setQualifierValue(
int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue value) {
if (qualifierValueBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureQualifierValueIsMutable();
qualifierValue_.set(index, value);
onChanged();
} else {
qualifierValueBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public Builder setQualifierValue(
int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder builderForValue) {
if (qualifierValueBuilder_ == null) {
ensureQualifierValueIsMutable();
qualifierValue_.set(index, builderForValue.build());
onChanged();
} else {
qualifierValueBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public Builder addQualifierValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue value) {
if (qualifierValueBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureQualifierValueIsMutable();
qualifierValue_.add(value);
onChanged();
} else {
qualifierValueBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public Builder addQualifierValue(
int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue value) {
if (qualifierValueBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureQualifierValueIsMutable();
qualifierValue_.add(index, value);
onChanged();
} else {
qualifierValueBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public Builder addQualifierValue(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder builderForValue) {
if (qualifierValueBuilder_ == null) {
ensureQualifierValueIsMutable();
qualifierValue_.add(builderForValue.build());
onChanged();
} else {
qualifierValueBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public Builder addQualifierValue(
int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder builderForValue) {
if (qualifierValueBuilder_ == null) {
ensureQualifierValueIsMutable();
qualifierValue_.add(index, builderForValue.build());
onChanged();
} else {
qualifierValueBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public Builder addAllQualifierValue(
java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue> values) {
if (qualifierValueBuilder_ == null) {
ensureQualifierValueIsMutable();
super.addAll(values, qualifierValue_);
onChanged();
} else {
qualifierValueBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public Builder clearQualifierValue() {
if (qualifierValueBuilder_ == null) {
qualifierValue_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
qualifierValueBuilder_.clear();
}
return this;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public Builder removeQualifierValue(int index) {
if (qualifierValueBuilder_ == null) {
ensureQualifierValueIsMutable();
qualifierValue_.remove(index);
onChanged();
} else {
qualifierValueBuilder_.remove(index);
}
return this;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder getQualifierValueBuilder(
int index) {
return getQualifierValueFieldBuilder().getBuilder(index);
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValueOrBuilder getQualifierValueOrBuilder(
int index) {
if (qualifierValueBuilder_ == null) {
return qualifierValue_.get(index); } else {
return qualifierValueBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValueOrBuilder>
getQualifierValueOrBuilderList() {
if (qualifierValueBuilder_ != null) {
return qualifierValueBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(qualifierValue_);
}
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder addQualifierValueBuilder() {
return getQualifierValueFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.getDefaultInstance());
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder addQualifierValueBuilder(
int index) {
return getQualifierValueFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.getDefaultInstance());
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue.QualifierValue qualifier_value = 2;
*/
public java.util.List
getQualifierValueBuilderList() {
return getQualifierValueFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValueOrBuilder>
getQualifierValueFieldBuilder() {
if (qualifierValueBuilder_ == null) {
qualifierValueBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValueOrBuilder>(
qualifierValue_,
((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
qualifierValue_ = null;
}
return qualifierValueBuilder_;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.MutationProto.ColumnValue)
}
static {
defaultInstance = new ColumnValue(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.MutationProto.ColumnValue)
}
private int bitField0_;
// optional bytes row = 1;
public static final int ROW_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString row_;
/**
* optional bytes row = 1;
*/
public boolean hasRow() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional bytes row = 1;
*/
public com.google.protobuf.ByteString getRow() {
return row_;
}
// optional .hbase.pb.MutationProto.MutationType mutate_type = 2;
public static final int MUTATE_TYPE_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType mutateType_;
/**
* optional .hbase.pb.MutationProto.MutationType mutate_type = 2;
*/
public boolean hasMutateType() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hbase.pb.MutationProto.MutationType mutate_type = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType getMutateType() {
return mutateType_;
}
// repeated .hbase.pb.MutationProto.ColumnValue column_value = 3;
public static final int COLUMN_VALUE_FIELD_NUMBER = 3;
private java.util.List columnValue_;
/**
* repeated .hbase.pb.MutationProto.ColumnValue column_value = 3;
*/
public java.util.List getColumnValueList() {
return columnValue_;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue column_value = 3;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValueOrBuilder>
getColumnValueOrBuilderList() {
return columnValue_;
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue column_value = 3;
*/
public int getColumnValueCount() {
return columnValue_.size();
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue column_value = 3;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue getColumnValue(int index) {
return columnValue_.get(index);
}
/**
* repeated .hbase.pb.MutationProto.ColumnValue column_value = 3;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValueOrBuilder getColumnValueOrBuilder(
int index) {
return columnValue_.get(index);
}
// optional uint64 timestamp = 4;
public static final int TIMESTAMP_FIELD_NUMBER = 4;
private long timestamp_;
/**
* optional uint64 timestamp = 4;
*/
public boolean hasTimestamp() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 timestamp = 4;
*/
public long getTimestamp() {
return timestamp_;
}
// repeated .hbase.pb.NameBytesPair attribute = 5;
public static final int ATTRIBUTE_FIELD_NUMBER = 5;
private java.util.List attribute_;
/**
* repeated .hbase.pb.NameBytesPair attribute = 5;
*/
public java.util.List getAttributeList() {
return attribute_;
}
/**
* repeated .hbase.pb.NameBytesPair attribute = 5;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder>
getAttributeOrBuilderList() {
return attribute_;
}
/**
* repeated .hbase.pb.NameBytesPair attribute = 5;
*/
public int getAttributeCount() {
return attribute_.size();
}
/**
* repeated .hbase.pb.NameBytesPair attribute = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair getAttribute(int index) {
return attribute_.get(index);
}
/**
* repeated .hbase.pb.NameBytesPair attribute = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder getAttributeOrBuilder(
int index) {
return attribute_.get(index);
}
// optional .hbase.pb.MutationProto.Durability durability = 6 [default = USE_DEFAULT];
public static final int DURABILITY_FIELD_NUMBER = 6;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Durability durability_;
/**
* optional .hbase.pb.MutationProto.Durability durability = 6 [default = USE_DEFAULT];
*/
public boolean hasDurability() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hbase.pb.MutationProto.Durability durability = 6 [default = USE_DEFAULT];
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Durability getDurability() {
return durability_;
}
// optional .hbase.pb.TimeRange time_range = 7;
public static final int TIME_RANGE_FIELD_NUMBER = 7;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange timeRange_;
/**
* optional .hbase.pb.TimeRange time_range = 7;
*
*
* For some mutations, a result may be returned, in which case,
* time range can be specified for potential performance gain
*
* For some mutations, a result may be returned, in which case,
* time range can be specified for potential performance gain
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder() {
return timeRange_;
}
// optional int32 associated_cell_count = 8;
public static final int ASSOCIATED_CELL_COUNT_FIELD_NUMBER = 8;
private int associatedCellCount_;
/**
* optional int32 associated_cell_count = 8;
*
*
* The below count is set when the associated cells are NOT
* part of this protobuf message; they are passed alongside
* and then this Message is a placeholder with metadata. The
* count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
* The below count is set when the associated cells are NOT
* part of this protobuf message; they are passed alongside
* and then this Message is a placeholder with metadata. The
* count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
*/
public int getAssociatedCellCount() {
return associatedCellCount_;
}
// optional uint64 nonce = 9;
public static final int NONCE_FIELD_NUMBER = 9;
private long nonce_;
/**
* optional uint64 nonce = 9;
*/
public boolean hasNonce() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint64 nonce = 9;
*/
public long getNonce() {
return nonce_;
}
private void initFields() {
row_ = com.google.protobuf.ByteString.EMPTY;
mutateType_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.APPEND;
columnValue_ = java.util.Collections.emptyList();
timestamp_ = 0L;
attribute_ = java.util.Collections.emptyList();
durability_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Durability.USE_DEFAULT;
timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
associatedCellCount_ = 0;
nonce_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getColumnValueCount(); i++) {
if (!getColumnValue(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getAttributeCount(); i++) {
if (!getAttribute(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, row_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(2, mutateType_.getNumber());
}
for (int i = 0; i < columnValue_.size(); i++) {
output.writeMessage(3, columnValue_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(4, timestamp_);
}
for (int i = 0; i < attribute_.size(); i++) {
output.writeMessage(5, attribute_.get(i));
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeEnum(6, durability_.getNumber());
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeMessage(7, timeRange_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeInt32(8, associatedCellCount_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt64(9, nonce_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, row_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(2, mutateType_.getNumber());
}
for (int i = 0; i < columnValue_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, columnValue_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, timestamp_);
}
for (int i = 0; i < attribute_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, attribute_.get(i));
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(6, durability_.getNumber());
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, timeRange_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(8, associatedCellCount_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(9, nonce_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) obj;
boolean result = true;
result = result && (hasRow() == other.hasRow());
if (hasRow()) {
result = result && getRow()
.equals(other.getRow());
}
result = result && (hasMutateType() == other.hasMutateType());
if (hasMutateType()) {
result = result &&
(getMutateType() == other.getMutateType());
}
result = result && getColumnValueList()
.equals(other.getColumnValueList());
result = result && (hasTimestamp() == other.hasTimestamp());
if (hasTimestamp()) {
result = result && (getTimestamp()
== other.getTimestamp());
}
result = result && getAttributeList()
.equals(other.getAttributeList());
result = result && (hasDurability() == other.hasDurability());
if (hasDurability()) {
result = result &&
(getDurability() == other.getDurability());
}
result = result && (hasTimeRange() == other.hasTimeRange());
if (hasTimeRange()) {
result = result && getTimeRange()
.equals(other.getTimeRange());
}
result = result && (hasAssociatedCellCount() == other.hasAssociatedCellCount());
if (hasAssociatedCellCount()) {
result = result && (getAssociatedCellCount()
== other.getAssociatedCellCount());
}
result = result && (hasNonce() == other.hasNonce());
if (hasNonce()) {
result = result && (getNonce()
== other.getNonce());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRow()) {
hash = (37 * hash) + ROW_FIELD_NUMBER;
hash = (53 * hash) + getRow().hashCode();
}
if (hasMutateType()) {
hash = (37 * hash) + MUTATE_TYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getMutateType());
}
if (getColumnValueCount() > 0) {
hash = (37 * hash) + COLUMN_VALUE_FIELD_NUMBER;
hash = (53 * hash) + getColumnValueList().hashCode();
}
if (hasTimestamp()) {
hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTimestamp());
}
if (getAttributeCount() > 0) {
hash = (37 * hash) + ATTRIBUTE_FIELD_NUMBER;
hash = (53 * hash) + getAttributeList().hashCode();
}
if (hasDurability()) {
hash = (37 * hash) + DURABILITY_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getDurability());
}
if (hasTimeRange()) {
hash = (37 * hash) + TIME_RANGE_FIELD_NUMBER;
hash = (53 * hash) + getTimeRange().hashCode();
}
if (hasAssociatedCellCount()) {
hash = (37 * hash) + ASSOCIATED_CELL_COUNT_FIELD_NUMBER;
hash = (53 * hash) + getAssociatedCellCount();
}
if (hasNonce()) {
hash = (37 * hash) + NONCE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNonce());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.MutationProto}
*
*
**
* A specific mutation inside a mutate request.
* It can be an append, increment, put or delete based
* on the mutation type. It can be fully filled in or
* only metadata present because data is being carried
* elsewhere outside of pb.
*
* The below count is set when the associated cells are NOT
* part of this protobuf message; they are passed alongside
* and then this Message is a placeholder with metadata. The
* count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
* The below count is set when the associated cells are NOT
* part of this protobuf message; they are passed alongside
* and then this Message is a placeholder with metadata. The
* count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
*/
public int getAssociatedCellCount() {
return associatedCellCount_;
}
/**
* optional int32 associated_cell_count = 8;
*
*
* The below count is set when the associated cells are NOT
* part of this protobuf message; they are passed alongside
* and then this Message is a placeholder with metadata. The
* count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
* The below count is set when the associated cells are NOT
* part of this protobuf message; they are passed alongside
* and then this Message is a placeholder with metadata. The
* count is needed to know how many to peel off the block of Cells as
* ours. NOTE: This is different from the pb managed cell_count of the
* 'cell' field above which is non-null when the cells are pb'd.
*
**
* The mutate request. Perform a single Mutate operation.
*
* Optionally, you can specify a condition. The mutate
* will take place only if the condition is met. Otherwise,
* the mutate will be ignored. In the response result,
* parameter processed is used to indicate if the mutate
* actually happened.
*
*/
@javax.annotation.Generated("proto") public static final class MutateRequest extends
com.google.protobuf.GeneratedMessage
implements MutateRequestOrBuilder {
// Use MutateRequest.newBuilder() to construct.
private MutateRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private MutateRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final MutateRequest defaultInstance;
public static MutateRequest getDefaultInstance() {
return defaultInstance;
}
public MutateRequest getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private MutateRequest(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = region_.toBuilder();
}
region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(region_);
region_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = mutation_.toBuilder();
}
mutation_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(mutation_);
mutation_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 26: {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = condition_.toBuilder();
}
condition_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(condition_);
condition_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 32: {
bitField0_ |= 0x00000008;
nonceGroup_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutateRequest_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_MutateRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public MutateRequest parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new MutateRequest(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hbase.pb.RegionSpecifier region = 1;
public static final int REGION_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_;
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public boolean hasRegion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() {
return region_;
}
/**
* required .hbase.pb.RegionSpecifier region = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() {
return region_;
}
// required .hbase.pb.MutationProto mutation = 2;
public static final int MUTATION_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto mutation_;
/**
* required .hbase.pb.MutationProto mutation = 2;
*/
public boolean hasMutation() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hbase.pb.MutationProto mutation = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto getMutation() {
return mutation_;
}
/**
* required .hbase.pb.MutationProto mutation = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProtoOrBuilder getMutationOrBuilder() {
return mutation_;
}
// optional .hbase.pb.Condition condition = 3;
public static final int CONDITION_FIELD_NUMBER = 3;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition condition_;
/**
* optional .hbase.pb.Condition condition = 3;
*/
public boolean hasCondition() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hbase.pb.Condition condition = 3;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition getCondition() {
return condition_;
}
/**
* optional .hbase.pb.Condition condition = 3;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ConditionOrBuilder getConditionOrBuilder() {
return condition_;
}
// optional uint64 nonce_group = 4;
public static final int NONCE_GROUP_FIELD_NUMBER = 4;
private long nonceGroup_;
/**
* optional uint64 nonce_group = 4;
*/
public boolean hasNonceGroup() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint64 nonce_group = 4;
*/
public long getNonceGroup() {
return nonceGroup_;
}
private void initFields() {
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
mutation_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.getDefaultInstance();
condition_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition.getDefaultInstance();
nonceGroup_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegion()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasMutation()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegion().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getMutation().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (hasCondition()) {
if (!getCondition().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, region_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, mutation_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(3, condition_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, nonceGroup_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, region_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, mutation_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, condition_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, nonceGroup_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest) obj;
boolean result = true;
result = result && (hasRegion() == other.hasRegion());
if (hasRegion()) {
result = result && getRegion()
.equals(other.getRegion());
}
result = result && (hasMutation() == other.hasMutation());
if (hasMutation()) {
result = result && getMutation()
.equals(other.getMutation());
}
result = result && (hasCondition() == other.hasCondition());
if (hasCondition()) {
result = result && getCondition()
.equals(other.getCondition());
}
result = result && (hasNonceGroup() == other.hasNonceGroup());
if (hasNonceGroup()) {
result = result && (getNonceGroup()
== other.getNonceGroup());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegion()) {
hash = (37 * hash) + REGION_FIELD_NUMBER;
hash = (53 * hash) + getRegion().hashCode();
}
if (hasMutation()) {
hash = (37 * hash) + MUTATION_FIELD_NUMBER;
hash = (53 * hash) + getMutation().hashCode();
}
if (hasCondition()) {
hash = (37 * hash) + CONDITION_FIELD_NUMBER;
hash = (53 * hash) + getCondition().hashCode();
}
if (hasNonceGroup()) {
hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNonceGroup());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.MutateRequest}
*
*
**
* The mutate request. Perform a single Mutate operation.
*
* Optionally, you can specify a condition. The mutate
* will take place only if the condition is met. Otherwise,
* the mutate will be ignored. In the response result,
* parameter processed is used to indicate if the mutate
* actually happened.
*
**
* Instead of get from a table, you can scan it with optional filters.
* You can specify the row key range, time range, the columns/families
* to scan and so on.
*
* This scan is used the first time in a scan request. The response of
* the initial scan will return a scanner id, which should be used to
* fetch result batches later on before it is closed.
*
*/
@javax.annotation.Generated("proto") public static final class Scan extends
com.google.protobuf.GeneratedMessage
implements ScanOrBuilder {
// Use Scan.newBuilder() to construct.
private Scan(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Scan(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Scan defaultInstance;
public static Scan getDefaultInstance() {
return defaultInstance;
}
public Scan getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Scan(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
column_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
column_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column.PARSER, extensionRegistry));
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
attribute_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
attribute_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.PARSER, extensionRegistry));
break;
}
case 26: {
bitField0_ |= 0x00000001;
startRow_ = input.readBytes();
break;
}
case 34: {
bitField0_ |= 0x00000002;
stopRow_ = input.readBytes();
break;
}
case 42: {
org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = filter_.toBuilder();
}
filter_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(filter_);
filter_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 50: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = timeRange_.toBuilder();
}
timeRange_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(timeRange_);
timeRange_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
case 56: {
bitField0_ |= 0x00000010;
maxVersions_ = input.readUInt32();
break;
}
case 64: {
bitField0_ |= 0x00000020;
cacheBlocks_ = input.readBool();
break;
}
case 72: {
bitField0_ |= 0x00000040;
batchSize_ = input.readUInt32();
break;
}
case 80: {
bitField0_ |= 0x00000080;
maxResultSize_ = input.readUInt64();
break;
}
case 88: {
bitField0_ |= 0x00000100;
storeLimit_ = input.readUInt32();
break;
}
case 96: {
bitField0_ |= 0x00000200;
storeOffset_ = input.readUInt32();
break;
}
case 104: {
bitField0_ |= 0x00000400;
loadColumnFamiliesOnDemand_ = input.readBool();
break;
}
case 112: {
bitField0_ |= 0x00000800;
small_ = input.readBool();
break;
}
case 120: {
bitField0_ |= 0x00001000;
reversed_ = input.readBool();
break;
}
case 128: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(16, rawValue);
} else {
bitField0_ |= 0x00002000;
consistency_ = value;
}
break;
}
case 136: {
bitField0_ |= 0x00004000;
caching_ = input.readUInt32();
break;
}
case 144: {
bitField0_ |= 0x00008000;
allowPartialResults_ = input.readBool();
break;
}
case 154: {
if (!((mutable_bitField0_ & 0x00040000) == 0x00040000)) {
cfTimeRange_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00040000;
}
cfTimeRange_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.PARSER, extensionRegistry));
break;
}
case 160: {
bitField0_ |= 0x00010000;
mvccReadPoint_ = input.readUInt64();
break;
}
case 168: {
bitField0_ |= 0x00020000;
includeStartRow_ = input.readBool();
break;
}
case 176: {
bitField0_ |= 0x00040000;
includeStopRow_ = input.readBool();
break;
}
case 184: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.ReadType value = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.ReadType.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(23, rawValue);
} else {
bitField0_ |= 0x00080000;
readType_ = value;
}
break;
}
case 192: {
bitField0_ |= 0x00100000;
needCursorResult_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
column_ = java.util.Collections.unmodifiableList(column_);
}
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
attribute_ = java.util.Collections.unmodifiableList(attribute_);
}
if (((mutable_bitField0_ & 0x00040000) == 0x00040000)) {
cfTimeRange_ = java.util.Collections.unmodifiableList(cfTimeRange_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Scan_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Scan_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public Scan parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Scan(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hbase.pb.Scan.ReadType}
*/
public enum ReadType
implements com.google.protobuf.ProtocolMessageEnum {
/**
* DEFAULT = 0;
*/
DEFAULT(0, 0),
/**
* STREAM = 1;
*/
STREAM(1, 1),
/**
* PREAD = 2;
*/
PREAD(2, 2),
;
/**
* DEFAULT = 0;
*/
public static final int DEFAULT_VALUE = 0;
/**
* STREAM = 1;
*/
public static final int STREAM_VALUE = 1;
/**
* PREAD = 2;
*/
public static final int PREAD_VALUE = 2;
public final int getNumber() { return value; }
public static ReadType valueOf(int value) {
switch (value) {
case 0: return DEFAULT;
case 1: return STREAM;
case 2: return PREAD;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public ReadType findValueByNumber(int number) {
return ReadType.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDescriptor().getEnumTypes().get(0);
}
private static final ReadType[] VALUES = values();
public static ReadType valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private ReadType(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hbase.pb.Scan.ReadType)
}
private int bitField0_;
// repeated .hbase.pb.Column column = 1;
public static final int COLUMN_FIELD_NUMBER = 1;
private java.util.List column_;
/**
* repeated .hbase.pb.Column column = 1;
*/
public java.util.List getColumnList() {
return column_;
}
/**
* repeated .hbase.pb.Column column = 1;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ColumnOrBuilder>
getColumnOrBuilderList() {
return column_;
}
/**
* repeated .hbase.pb.Column column = 1;
*/
public int getColumnCount() {
return column_.size();
}
/**
* repeated .hbase.pb.Column column = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column getColumn(int index) {
return column_.get(index);
}
/**
* repeated .hbase.pb.Column column = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ColumnOrBuilder getColumnOrBuilder(
int index) {
return column_.get(index);
}
// repeated .hbase.pb.NameBytesPair attribute = 2;
public static final int ATTRIBUTE_FIELD_NUMBER = 2;
private java.util.List attribute_;
/**
* repeated .hbase.pb.NameBytesPair attribute = 2;
*/
public java.util.List getAttributeList() {
return attribute_;
}
/**
* repeated .hbase.pb.NameBytesPair attribute = 2;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder>
getAttributeOrBuilderList() {
return attribute_;
}
/**
* repeated .hbase.pb.NameBytesPair attribute = 2;
*/
public int getAttributeCount() {
return attribute_.size();
}
/**
* repeated .hbase.pb.NameBytesPair attribute = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair getAttribute(int index) {
return attribute_.get(index);
}
/**
* repeated .hbase.pb.NameBytesPair attribute = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder getAttributeOrBuilder(
int index) {
return attribute_.get(index);
}
// optional bytes start_row = 3;
public static final int START_ROW_FIELD_NUMBER = 3;
private com.google.protobuf.ByteString startRow_;
/**
* optional bytes start_row = 3;
*/
public boolean hasStartRow() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional bytes start_row = 3;
*/
public com.google.protobuf.ByteString getStartRow() {
return startRow_;
}
// optional bytes stop_row = 4;
public static final int STOP_ROW_FIELD_NUMBER = 4;
private com.google.protobuf.ByteString stopRow_;
/**
* optional bytes stop_row = 4;
*/
public boolean hasStopRow() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bytes stop_row = 4;
*/
public com.google.protobuf.ByteString getStopRow() {
return stopRow_;
}
// optional .hbase.pb.Filter filter = 5;
public static final int FILTER_FIELD_NUMBER = 5;
private org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter filter_;
/**
* optional .hbase.pb.Filter filter = 5;
*/
public boolean hasFilter() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hbase.pb.Filter filter = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter getFilter() {
return filter_;
}
/**
* optional .hbase.pb.Filter filter = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.FilterProtos.FilterOrBuilder getFilterOrBuilder() {
return filter_;
}
// optional .hbase.pb.TimeRange time_range = 6;
public static final int TIME_RANGE_FIELD_NUMBER = 6;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange timeRange_;
/**
* optional .hbase.pb.TimeRange time_range = 6;
*/
public boolean hasTimeRange() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional .hbase.pb.TimeRange time_range = 6;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getTimeRange() {
return timeRange_;
}
/**
* optional .hbase.pb.TimeRange time_range = 6;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder getTimeRangeOrBuilder() {
return timeRange_;
}
// optional uint32 max_versions = 7 [default = 1];
public static final int MAX_VERSIONS_FIELD_NUMBER = 7;
private int maxVersions_;
/**
* optional uint32 max_versions = 7 [default = 1];
*/
public boolean hasMaxVersions() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional uint32 max_versions = 7 [default = 1];
*/
public int getMaxVersions() {
return maxVersions_;
}
// optional bool cache_blocks = 8 [default = true];
public static final int CACHE_BLOCKS_FIELD_NUMBER = 8;
private boolean cacheBlocks_;
/**
* optional bool cache_blocks = 8 [default = true];
*/
public boolean hasCacheBlocks() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional bool cache_blocks = 8 [default = true];
*/
public boolean getCacheBlocks() {
return cacheBlocks_;
}
// optional uint32 batch_size = 9;
public static final int BATCH_SIZE_FIELD_NUMBER = 9;
private int batchSize_;
/**
* optional uint32 batch_size = 9;
*/
public boolean hasBatchSize() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional uint32 batch_size = 9;
*/
public int getBatchSize() {
return batchSize_;
}
// optional uint64 max_result_size = 10;
public static final int MAX_RESULT_SIZE_FIELD_NUMBER = 10;
private long maxResultSize_;
/**
* optional uint64 max_result_size = 10;
*/
public boolean hasMaxResultSize() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional uint64 max_result_size = 10;
*/
public long getMaxResultSize() {
return maxResultSize_;
}
// optional uint32 store_limit = 11;
public static final int STORE_LIMIT_FIELD_NUMBER = 11;
private int storeLimit_;
/**
* optional uint32 store_limit = 11;
*/
public boolean hasStoreLimit() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional uint32 store_limit = 11;
*/
public int getStoreLimit() {
return storeLimit_;
}
// optional uint32 store_offset = 12;
public static final int STORE_OFFSET_FIELD_NUMBER = 12;
private int storeOffset_;
/**
* optional uint32 store_offset = 12;
*/
public boolean hasStoreOffset() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional uint32 store_offset = 12;
*/
public int getStoreOffset() {
return storeOffset_;
}
// optional bool load_column_families_on_demand = 13;
public static final int LOAD_COLUMN_FAMILIES_ON_DEMAND_FIELD_NUMBER = 13;
private boolean loadColumnFamiliesOnDemand_;
/**
* optional bool load_column_families_on_demand = 13;
*
*
* DO NOT add defaults to load_column_families_on_demand.
*
* DO NOT add defaults to load_column_families_on_demand.
*
*/
public boolean getLoadColumnFamiliesOnDemand() {
return loadColumnFamiliesOnDemand_;
}
// optional bool small = 14 [deprecated = true];
public static final int SMALL_FIELD_NUMBER = 14;
private boolean small_;
/**
* optional bool small = 14 [deprecated = true];
*/
@java.lang.Deprecated public boolean hasSmall() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* optional bool small = 14 [deprecated = true];
*/
@java.lang.Deprecated public boolean getSmall() {
return small_;
}
// optional bool reversed = 15 [default = false];
public static final int REVERSED_FIELD_NUMBER = 15;
private boolean reversed_;
/**
* optional bool reversed = 15 [default = false];
*/
public boolean hasReversed() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* optional bool reversed = 15 [default = false];
*/
public boolean getReversed() {
return reversed_;
}
// optional .hbase.pb.Consistency consistency = 16 [default = STRONG];
public static final int CONSISTENCY_FIELD_NUMBER = 16;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency consistency_;
/**
* optional .hbase.pb.Consistency consistency = 16 [default = STRONG];
*/
public boolean hasConsistency() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
* optional .hbase.pb.Consistency consistency = 16 [default = STRONG];
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency getConsistency() {
return consistency_;
}
// optional uint32 caching = 17;
public static final int CACHING_FIELD_NUMBER = 17;
private int caching_;
/**
* optional uint32 caching = 17;
*/
public boolean hasCaching() {
return ((bitField0_ & 0x00004000) == 0x00004000);
}
/**
* optional uint32 caching = 17;
*/
public int getCaching() {
return caching_;
}
// optional bool allow_partial_results = 18;
public static final int ALLOW_PARTIAL_RESULTS_FIELD_NUMBER = 18;
private boolean allowPartialResults_;
/**
* optional bool allow_partial_results = 18;
*/
public boolean hasAllowPartialResults() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
/**
* optional bool allow_partial_results = 18;
*/
public boolean getAllowPartialResults() {
return allowPartialResults_;
}
// repeated .hbase.pb.ColumnFamilyTimeRange cf_time_range = 19;
public static final int CF_TIME_RANGE_FIELD_NUMBER = 19;
private java.util.List cfTimeRange_;
/**
* repeated .hbase.pb.ColumnFamilyTimeRange cf_time_range = 19;
*/
public java.util.List getCfTimeRangeList() {
return cfTimeRange_;
}
/**
* repeated .hbase.pb.ColumnFamilyTimeRange cf_time_range = 19;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRangeOrBuilder>
getCfTimeRangeOrBuilderList() {
return cfTimeRange_;
}
/**
* repeated .hbase.pb.ColumnFamilyTimeRange cf_time_range = 19;
*/
public int getCfTimeRangeCount() {
return cfTimeRange_.size();
}
/**
* repeated .hbase.pb.ColumnFamilyTimeRange cf_time_range = 19;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange getCfTimeRange(int index) {
return cfTimeRange_.get(index);
}
/**
* repeated .hbase.pb.ColumnFamilyTimeRange cf_time_range = 19;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRangeOrBuilder getCfTimeRangeOrBuilder(
int index) {
return cfTimeRange_.get(index);
}
// optional uint64 mvcc_read_point = 20 [default = 0];
public static final int MVCC_READ_POINT_FIELD_NUMBER = 20;
private long mvccReadPoint_;
/**
* optional uint64 mvcc_read_point = 20 [default = 0];
*/
public boolean hasMvccReadPoint() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
* optional uint64 mvcc_read_point = 20 [default = 0];
*/
public long getMvccReadPoint() {
return mvccReadPoint_;
}
// optional bool include_start_row = 21 [default = true];
public static final int INCLUDE_START_ROW_FIELD_NUMBER = 21;
private boolean includeStartRow_;
/**
* optional bool include_start_row = 21 [default = true];
*/
public boolean hasIncludeStartRow() {
return ((bitField0_ & 0x00020000) == 0x00020000);
}
/**
* optional bool include_start_row = 21 [default = true];
*/
public boolean getIncludeStartRow() {
return includeStartRow_;
}
// optional bool include_stop_row = 22 [default = false];
public static final int INCLUDE_STOP_ROW_FIELD_NUMBER = 22;
private boolean includeStopRow_;
/**
* optional bool include_stop_row = 22 [default = false];
*/
public boolean hasIncludeStopRow() {
return ((bitField0_ & 0x00040000) == 0x00040000);
}
/**
* optional bool include_stop_row = 22 [default = false];
*/
public boolean getIncludeStopRow() {
return includeStopRow_;
}
// optional .hbase.pb.Scan.ReadType readType = 23 [default = DEFAULT];
public static final int READTYPE_FIELD_NUMBER = 23;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.ReadType readType_;
/**
* optional .hbase.pb.Scan.ReadType readType = 23 [default = DEFAULT];
*/
public boolean hasReadType() {
return ((bitField0_ & 0x00080000) == 0x00080000);
}
/**
* optional .hbase.pb.Scan.ReadType readType = 23 [default = DEFAULT];
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.ReadType getReadType() {
return readType_;
}
// optional bool need_cursor_result = 24 [default = false];
public static final int NEED_CURSOR_RESULT_FIELD_NUMBER = 24;
private boolean needCursorResult_;
/**
* optional bool need_cursor_result = 24 [default = false];
*/
public boolean hasNeedCursorResult() {
return ((bitField0_ & 0x00100000) == 0x00100000);
}
/**
* optional bool need_cursor_result = 24 [default = false];
*/
public boolean getNeedCursorResult() {
return needCursorResult_;
}
private void initFields() {
column_ = java.util.Collections.emptyList();
attribute_ = java.util.Collections.emptyList();
startRow_ = com.google.protobuf.ByteString.EMPTY;
stopRow_ = com.google.protobuf.ByteString.EMPTY;
filter_ = org.apache.hadoop.hbase.protobuf.generated.FilterProtos.Filter.getDefaultInstance();
timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
maxVersions_ = 1;
cacheBlocks_ = true;
batchSize_ = 0;
maxResultSize_ = 0L;
storeLimit_ = 0;
storeOffset_ = 0;
loadColumnFamiliesOnDemand_ = false;
small_ = false;
reversed_ = false;
consistency_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Consistency.STRONG;
caching_ = 0;
allowPartialResults_ = false;
cfTimeRange_ = java.util.Collections.emptyList();
mvccReadPoint_ = 0L;
includeStartRow_ = true;
includeStopRow_ = false;
readType_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.ReadType.DEFAULT;
needCursorResult_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getColumnCount(); i++) {
if (!getColumn(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getAttributeCount(); i++) {
if (!getAttribute(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasFilter()) {
if (!getFilter().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getCfTimeRangeCount(); i++) {
if (!getCfTimeRange(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < column_.size(); i++) {
output.writeMessage(1, column_.get(i));
}
for (int i = 0; i < attribute_.size(); i++) {
output.writeMessage(2, attribute_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(3, startRow_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(4, stopRow_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(5, filter_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(6, timeRange_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt32(7, maxVersions_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBool(8, cacheBlocks_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt32(9, batchSize_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeUInt64(10, maxResultSize_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeUInt32(11, storeLimit_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeUInt32(12, storeOffset_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeBool(13, loadColumnFamiliesOnDemand_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
output.writeBool(14, small_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
output.writeBool(15, reversed_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
output.writeEnum(16, consistency_.getNumber());
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
output.writeUInt32(17, caching_);
}
if (((bitField0_ & 0x00008000) == 0x00008000)) {
output.writeBool(18, allowPartialResults_);
}
for (int i = 0; i < cfTimeRange_.size(); i++) {
output.writeMessage(19, cfTimeRange_.get(i));
}
if (((bitField0_ & 0x00010000) == 0x00010000)) {
output.writeUInt64(20, mvccReadPoint_);
}
if (((bitField0_ & 0x00020000) == 0x00020000)) {
output.writeBool(21, includeStartRow_);
}
if (((bitField0_ & 0x00040000) == 0x00040000)) {
output.writeBool(22, includeStopRow_);
}
if (((bitField0_ & 0x00080000) == 0x00080000)) {
output.writeEnum(23, readType_.getNumber());
}
if (((bitField0_ & 0x00100000) == 0x00100000)) {
output.writeBool(24, needCursorResult_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < column_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, column_.get(i));
}
for (int i = 0; i < attribute_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, attribute_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, startRow_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, stopRow_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, filter_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, timeRange_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(7, maxVersions_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(8, cacheBlocks_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(9, batchSize_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(10, maxResultSize_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(11, storeLimit_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(12, storeOffset_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(13, loadColumnFamiliesOnDemand_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(14, small_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(15, reversed_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(16, consistency_.getNumber());
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(17, caching_);
}
if (((bitField0_ & 0x00008000) == 0x00008000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(18, allowPartialResults_);
}
for (int i = 0; i < cfTimeRange_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(19, cfTimeRange_.get(i));
}
if (((bitField0_ & 0x00010000) == 0x00010000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(20, mvccReadPoint_);
}
if (((bitField0_ & 0x00020000) == 0x00020000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(21, includeStartRow_);
}
if (((bitField0_ & 0x00040000) == 0x00040000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(22, includeStopRow_);
}
if (((bitField0_ & 0x00080000) == 0x00080000)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(23, readType_.getNumber());
}
if (((bitField0_ & 0x00100000) == 0x00100000)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(24, needCursorResult_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan) obj;
boolean result = true;
result = result && getColumnList()
.equals(other.getColumnList());
result = result && getAttributeList()
.equals(other.getAttributeList());
result = result && (hasStartRow() == other.hasStartRow());
if (hasStartRow()) {
result = result && getStartRow()
.equals(other.getStartRow());
}
result = result && (hasStopRow() == other.hasStopRow());
if (hasStopRow()) {
result = result && getStopRow()
.equals(other.getStopRow());
}
result = result && (hasFilter() == other.hasFilter());
if (hasFilter()) {
result = result && getFilter()
.equals(other.getFilter());
}
result = result && (hasTimeRange() == other.hasTimeRange());
if (hasTimeRange()) {
result = result && getTimeRange()
.equals(other.getTimeRange());
}
result = result && (hasMaxVersions() == other.hasMaxVersions());
if (hasMaxVersions()) {
result = result && (getMaxVersions()
== other.getMaxVersions());
}
result = result && (hasCacheBlocks() == other.hasCacheBlocks());
if (hasCacheBlocks()) {
result = result && (getCacheBlocks()
== other.getCacheBlocks());
}
result = result && (hasBatchSize() == other.hasBatchSize());
if (hasBatchSize()) {
result = result && (getBatchSize()
== other.getBatchSize());
}
result = result && (hasMaxResultSize() == other.hasMaxResultSize());
if (hasMaxResultSize()) {
result = result && (getMaxResultSize()
== other.getMaxResultSize());
}
result = result && (hasStoreLimit() == other.hasStoreLimit());
if (hasStoreLimit()) {
result = result && (getStoreLimit()
== other.getStoreLimit());
}
result = result && (hasStoreOffset() == other.hasStoreOffset());
if (hasStoreOffset()) {
result = result && (getStoreOffset()
== other.getStoreOffset());
}
result = result && (hasLoadColumnFamiliesOnDemand() == other.hasLoadColumnFamiliesOnDemand());
if (hasLoadColumnFamiliesOnDemand()) {
result = result && (getLoadColumnFamiliesOnDemand()
== other.getLoadColumnFamiliesOnDemand());
}
result = result && (hasSmall() == other.hasSmall());
if (hasSmall()) {
result = result && (getSmall()
== other.getSmall());
}
result = result && (hasReversed() == other.hasReversed());
if (hasReversed()) {
result = result && (getReversed()
== other.getReversed());
}
result = result && (hasConsistency() == other.hasConsistency());
if (hasConsistency()) {
result = result &&
(getConsistency() == other.getConsistency());
}
result = result && (hasCaching() == other.hasCaching());
if (hasCaching()) {
result = result && (getCaching()
== other.getCaching());
}
result = result && (hasAllowPartialResults() == other.hasAllowPartialResults());
if (hasAllowPartialResults()) {
result = result && (getAllowPartialResults()
== other.getAllowPartialResults());
}
result = result && getCfTimeRangeList()
.equals(other.getCfTimeRangeList());
result = result && (hasMvccReadPoint() == other.hasMvccReadPoint());
if (hasMvccReadPoint()) {
result = result && (getMvccReadPoint()
== other.getMvccReadPoint());
}
result = result && (hasIncludeStartRow() == other.hasIncludeStartRow());
if (hasIncludeStartRow()) {
result = result && (getIncludeStartRow()
== other.getIncludeStartRow());
}
result = result && (hasIncludeStopRow() == other.hasIncludeStopRow());
if (hasIncludeStopRow()) {
result = result && (getIncludeStopRow()
== other.getIncludeStopRow());
}
result = result && (hasReadType() == other.hasReadType());
if (hasReadType()) {
result = result &&
(getReadType() == other.getReadType());
}
result = result && (hasNeedCursorResult() == other.hasNeedCursorResult());
if (hasNeedCursorResult()) {
result = result && (getNeedCursorResult()
== other.getNeedCursorResult());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getColumnCount() > 0) {
hash = (37 * hash) + COLUMN_FIELD_NUMBER;
hash = (53 * hash) + getColumnList().hashCode();
}
if (getAttributeCount() > 0) {
hash = (37 * hash) + ATTRIBUTE_FIELD_NUMBER;
hash = (53 * hash) + getAttributeList().hashCode();
}
if (hasStartRow()) {
hash = (37 * hash) + START_ROW_FIELD_NUMBER;
hash = (53 * hash) + getStartRow().hashCode();
}
if (hasStopRow()) {
hash = (37 * hash) + STOP_ROW_FIELD_NUMBER;
hash = (53 * hash) + getStopRow().hashCode();
}
if (hasFilter()) {
hash = (37 * hash) + FILTER_FIELD_NUMBER;
hash = (53 * hash) + getFilter().hashCode();
}
if (hasTimeRange()) {
hash = (37 * hash) + TIME_RANGE_FIELD_NUMBER;
hash = (53 * hash) + getTimeRange().hashCode();
}
if (hasMaxVersions()) {
hash = (37 * hash) + MAX_VERSIONS_FIELD_NUMBER;
hash = (53 * hash) + getMaxVersions();
}
if (hasCacheBlocks()) {
hash = (37 * hash) + CACHE_BLOCKS_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getCacheBlocks());
}
if (hasBatchSize()) {
hash = (37 * hash) + BATCH_SIZE_FIELD_NUMBER;
hash = (53 * hash) + getBatchSize();
}
if (hasMaxResultSize()) {
hash = (37 * hash) + MAX_RESULT_SIZE_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getMaxResultSize());
}
if (hasStoreLimit()) {
hash = (37 * hash) + STORE_LIMIT_FIELD_NUMBER;
hash = (53 * hash) + getStoreLimit();
}
if (hasStoreOffset()) {
hash = (37 * hash) + STORE_OFFSET_FIELD_NUMBER;
hash = (53 * hash) + getStoreOffset();
}
if (hasLoadColumnFamiliesOnDemand()) {
hash = (37 * hash) + LOAD_COLUMN_FAMILIES_ON_DEMAND_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getLoadColumnFamiliesOnDemand());
}
if (hasSmall()) {
hash = (37 * hash) + SMALL_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getSmall());
}
if (hasReversed()) {
hash = (37 * hash) + REVERSED_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getReversed());
}
if (hasConsistency()) {
hash = (37 * hash) + CONSISTENCY_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getConsistency());
}
if (hasCaching()) {
hash = (37 * hash) + CACHING_FIELD_NUMBER;
hash = (53 * hash) + getCaching();
}
if (hasAllowPartialResults()) {
hash = (37 * hash) + ALLOW_PARTIAL_RESULTS_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getAllowPartialResults());
}
if (getCfTimeRangeCount() > 0) {
hash = (37 * hash) + CF_TIME_RANGE_FIELD_NUMBER;
hash = (53 * hash) + getCfTimeRangeList().hashCode();
}
if (hasMvccReadPoint()) {
hash = (37 * hash) + MVCC_READ_POINT_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getMvccReadPoint());
}
if (hasIncludeStartRow()) {
hash = (37 * hash) + INCLUDE_START_ROW_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getIncludeStartRow());
}
if (hasIncludeStopRow()) {
hash = (37 * hash) + INCLUDE_STOP_ROW_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getIncludeStopRow());
}
if (hasReadType()) {
hash = (37 * hash) + READTYPE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getReadType());
}
if (hasNeedCursorResult()) {
hash = (37 * hash) + NEED_CURSOR_RESULT_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getNeedCursorResult());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.Scan}
*
*
**
* Instead of get from a table, you can scan it with optional filters.
* You can specify the row key range, time range, the columns/families
* to scan and so on.
*
* This scan is used the first time in a scan request. The response of
* the initial scan will return a scanner id, which should be used to
* fetch result batches later on before it is closed.
*
* if we have returned limit_of_rows rows to client, then close the scanner.
*
*/
int getLimitOfRows();
}
/**
* Protobuf type {@code hbase.pb.ScanRequest}
*
*
**
* A scan request. Initially, it should specify a scan. Later on, you
* can use the scanner id returned to fetch result batches with a different
* scan request.
*
* The scanner will remain open if there are more results, and it's not
* asked to be closed explicitly.
*
* You can fetch the results and ask the scanner to be closed to save
* a trip if you are not interested in remaining results.
*
*/
@javax.annotation.Generated("proto") public static final class ScanRequest extends
com.google.protobuf.GeneratedMessage
implements ScanRequestOrBuilder {
// Use ScanRequest.newBuilder() to construct.
private ScanRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ScanRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ScanRequest defaultInstance;
public static ScanRequest getDefaultInstance() {
return defaultInstance;
}
public ScanRequest getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ScanRequest(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = region_.toBuilder();
}
region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(region_);
region_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = scan_.toBuilder();
}
scan_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(scan_);
scan_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 24: {
bitField0_ |= 0x00000004;
scannerId_ = input.readUInt64();
break;
}
case 32: {
bitField0_ |= 0x00000008;
numberOfRows_ = input.readUInt32();
break;
}
case 40: {
bitField0_ |= 0x00000010;
closeScanner_ = input.readBool();
break;
}
case 48: {
bitField0_ |= 0x00000020;
nextCallSeq_ = input.readUInt64();
break;
}
case 56: {
bitField0_ |= 0x00000040;
clientHandlesPartials_ = input.readBool();
break;
}
case 64: {
bitField0_ |= 0x00000080;
clientHandlesHeartbeats_ = input.readBool();
break;
}
case 72: {
bitField0_ |= 0x00000100;
trackScanMetrics_ = input.readBool();
break;
}
case 80: {
bitField0_ |= 0x00000200;
renew_ = input.readBool();
break;
}
case 88: {
bitField0_ |= 0x00000400;
limitOfRows_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_ScanRequest_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_ScanRequest_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ScanRequest parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ScanRequest(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional .hbase.pb.RegionSpecifier region = 1;
public static final int REGION_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_;
/**
* optional .hbase.pb.RegionSpecifier region = 1;
*/
public boolean hasRegion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional .hbase.pb.RegionSpecifier region = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() {
return region_;
}
/**
* optional .hbase.pb.RegionSpecifier region = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() {
return region_;
}
// optional .hbase.pb.Scan scan = 2;
public static final int SCAN_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan scan_;
/**
* optional .hbase.pb.Scan scan = 2;
*/
public boolean hasScan() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional .hbase.pb.Scan scan = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan getScan() {
return scan_;
}
/**
* optional .hbase.pb.Scan scan = 2;
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder getScanOrBuilder() {
return scan_;
}
// optional uint64 scanner_id = 3;
public static final int SCANNER_ID_FIELD_NUMBER = 3;
private long scannerId_;
/**
* optional uint64 scanner_id = 3;
*/
public boolean hasScannerId() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint64 scanner_id = 3;
*/
public long getScannerId() {
return scannerId_;
}
// optional uint32 number_of_rows = 4;
public static final int NUMBER_OF_ROWS_FIELD_NUMBER = 4;
private int numberOfRows_;
/**
* optional uint32 number_of_rows = 4;
*/
public boolean hasNumberOfRows() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional uint32 number_of_rows = 4;
*/
public int getNumberOfRows() {
return numberOfRows_;
}
// optional bool close_scanner = 5;
public static final int CLOSE_SCANNER_FIELD_NUMBER = 5;
private boolean closeScanner_;
/**
* optional bool close_scanner = 5;
*/
public boolean hasCloseScanner() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* optional bool close_scanner = 5;
*/
public boolean getCloseScanner() {
return closeScanner_;
}
// optional uint64 next_call_seq = 6;
public static final int NEXT_CALL_SEQ_FIELD_NUMBER = 6;
private long nextCallSeq_;
/**
* optional uint64 next_call_seq = 6;
*/
public boolean hasNextCallSeq() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* optional uint64 next_call_seq = 6;
*/
public long getNextCallSeq() {
return nextCallSeq_;
}
// optional bool client_handles_partials = 7;
public static final int CLIENT_HANDLES_PARTIALS_FIELD_NUMBER = 7;
private boolean clientHandlesPartials_;
/**
* optional bool client_handles_partials = 7;
*/
public boolean hasClientHandlesPartials() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional bool client_handles_partials = 7;
*/
public boolean getClientHandlesPartials() {
return clientHandlesPartials_;
}
// optional bool client_handles_heartbeats = 8;
public static final int CLIENT_HANDLES_HEARTBEATS_FIELD_NUMBER = 8;
private boolean clientHandlesHeartbeats_;
/**
* optional bool client_handles_heartbeats = 8;
*/
public boolean hasClientHandlesHeartbeats() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* optional bool client_handles_heartbeats = 8;
*/
public boolean getClientHandlesHeartbeats() {
return clientHandlesHeartbeats_;
}
// optional bool track_scan_metrics = 9;
public static final int TRACK_SCAN_METRICS_FIELD_NUMBER = 9;
private boolean trackScanMetrics_;
/**
* optional bool track_scan_metrics = 9;
*/
public boolean hasTrackScanMetrics() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* optional bool track_scan_metrics = 9;
*/
public boolean getTrackScanMetrics() {
return trackScanMetrics_;
}
// optional bool renew = 10 [default = false];
public static final int RENEW_FIELD_NUMBER = 10;
private boolean renew_;
/**
* optional bool renew = 10 [default = false];
*/
public boolean hasRenew() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* optional bool renew = 10 [default = false];
*/
public boolean getRenew() {
return renew_;
}
// optional uint32 limit_of_rows = 11 [default = 0];
public static final int LIMIT_OF_ROWS_FIELD_NUMBER = 11;
private int limitOfRows_;
/**
* optional uint32 limit_of_rows = 11 [default = 0];
*
*
* if we have returned limit_of_rows rows to client, then close the scanner.
*
* if we have returned limit_of_rows rows to client, then close the scanner.
*
*/
public int getLimitOfRows() {
return limitOfRows_;
}
private void initFields() {
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance();
scannerId_ = 0L;
numberOfRows_ = 0;
closeScanner_ = false;
nextCallSeq_ = 0L;
clientHandlesPartials_ = false;
clientHandlesHeartbeats_ = false;
trackScanMetrics_ = false;
renew_ = false;
limitOfRows_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (hasRegion()) {
if (!getRegion().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasScan()) {
if (!getScan().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, region_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, scan_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, scannerId_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt32(4, numberOfRows_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBool(5, closeScanner_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(6, nextCallSeq_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeBool(7, clientHandlesPartials_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeBool(8, clientHandlesHeartbeats_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeBool(9, trackScanMetrics_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeBool(10, renew_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeUInt32(11, limitOfRows_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, region_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, scan_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, scannerId_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(4, numberOfRows_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(5, closeScanner_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(6, nextCallSeq_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(7, clientHandlesPartials_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(8, clientHandlesHeartbeats_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(9, trackScanMetrics_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(10, renew_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(11, limitOfRows_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest) obj;
boolean result = true;
result = result && (hasRegion() == other.hasRegion());
if (hasRegion()) {
result = result && getRegion()
.equals(other.getRegion());
}
result = result && (hasScan() == other.hasScan());
if (hasScan()) {
result = result && getScan()
.equals(other.getScan());
}
result = result && (hasScannerId() == other.hasScannerId());
if (hasScannerId()) {
result = result && (getScannerId()
== other.getScannerId());
}
result = result && (hasNumberOfRows() == other.hasNumberOfRows());
if (hasNumberOfRows()) {
result = result && (getNumberOfRows()
== other.getNumberOfRows());
}
result = result && (hasCloseScanner() == other.hasCloseScanner());
if (hasCloseScanner()) {
result = result && (getCloseScanner()
== other.getCloseScanner());
}
result = result && (hasNextCallSeq() == other.hasNextCallSeq());
if (hasNextCallSeq()) {
result = result && (getNextCallSeq()
== other.getNextCallSeq());
}
result = result && (hasClientHandlesPartials() == other.hasClientHandlesPartials());
if (hasClientHandlesPartials()) {
result = result && (getClientHandlesPartials()
== other.getClientHandlesPartials());
}
result = result && (hasClientHandlesHeartbeats() == other.hasClientHandlesHeartbeats());
if (hasClientHandlesHeartbeats()) {
result = result && (getClientHandlesHeartbeats()
== other.getClientHandlesHeartbeats());
}
result = result && (hasTrackScanMetrics() == other.hasTrackScanMetrics());
if (hasTrackScanMetrics()) {
result = result && (getTrackScanMetrics()
== other.getTrackScanMetrics());
}
result = result && (hasRenew() == other.hasRenew());
if (hasRenew()) {
result = result && (getRenew()
== other.getRenew());
}
result = result && (hasLimitOfRows() == other.hasLimitOfRows());
if (hasLimitOfRows()) {
result = result && (getLimitOfRows()
== other.getLimitOfRows());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegion()) {
hash = (37 * hash) + REGION_FIELD_NUMBER;
hash = (53 * hash) + getRegion().hashCode();
}
if (hasScan()) {
hash = (37 * hash) + SCAN_FIELD_NUMBER;
hash = (53 * hash) + getScan().hashCode();
}
if (hasScannerId()) {
hash = (37 * hash) + SCANNER_ID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getScannerId());
}
if (hasNumberOfRows()) {
hash = (37 * hash) + NUMBER_OF_ROWS_FIELD_NUMBER;
hash = (53 * hash) + getNumberOfRows();
}
if (hasCloseScanner()) {
hash = (37 * hash) + CLOSE_SCANNER_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getCloseScanner());
}
if (hasNextCallSeq()) {
hash = (37 * hash) + NEXT_CALL_SEQ_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNextCallSeq());
}
if (hasClientHandlesPartials()) {
hash = (37 * hash) + CLIENT_HANDLES_PARTIALS_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getClientHandlesPartials());
}
if (hasClientHandlesHeartbeats()) {
hash = (37 * hash) + CLIENT_HANDLES_HEARTBEATS_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getClientHandlesHeartbeats());
}
if (hasTrackScanMetrics()) {
hash = (37 * hash) + TRACK_SCAN_METRICS_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getTrackScanMetrics());
}
if (hasRenew()) {
hash = (37 * hash) + RENEW_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getRenew());
}
if (hasLimitOfRows()) {
hash = (37 * hash) + LIMIT_OF_ROWS_FIELD_NUMBER;
hash = (53 * hash) + getLimitOfRows();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.ScanRequest}
*
*
**
* A scan request. Initially, it should specify a scan. Later on, you
* can use the scanner id returned to fetch result batches with a different
* scan request.
*
* The scanner will remain open if there are more results, and it's not
* asked to be closed explicitly.
*
* You can fetch the results and ask the scanner to be closed to save
* a trip if you are not interested in remaining results.
*
**
* Scan cursor to tell client where we are scanning.
*
*/
@javax.annotation.Generated("proto") public static final class Cursor extends
com.google.protobuf.GeneratedMessage
implements CursorOrBuilder {
// Use Cursor.newBuilder() to construct.
private Cursor(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Cursor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Cursor defaultInstance;
public static Cursor getDefaultInstance() {
return defaultInstance;
}
public Cursor getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Cursor(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
row_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public Cursor parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Cursor(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional bytes row = 1;
public static final int ROW_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString row_;
/**
* optional bytes row = 1;
*/
public boolean hasRow() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional bytes row = 1;
*/
public com.google.protobuf.ByteString getRow() {
return row_;
}
private void initFields() {
row_ = com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, row_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, row_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor) obj;
boolean result = true;
result = result && (hasRow() == other.hasRow());
if (hasRow()) {
result = result && getRow()
.equals(other.getRow());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRow()) {
hash = (37 * hash) + ROW_FIELD_NUMBER;
hash = (53 * hash) + getRow().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.Cursor}
*
*
**
* Scan cursor to tell client where we are scanning.
*
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
row_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_Cursor_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor build() {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.row_ = row_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.getDefaultInstance()) return this;
if (other.hasRow()) {
setRow(other.getRow());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional bytes row = 1;
private com.google.protobuf.ByteString row_ = com.google.protobuf.ByteString.EMPTY;
/**
* optional bytes row = 1;
*/
public boolean hasRow() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional bytes row = 1;
*/
public com.google.protobuf.ByteString getRow() {
return row_;
}
/**
* optional bytes row = 1;
*/
public Builder setRow(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
row_ = value;
onChanged();
return this;
}
/**
* optional bytes row = 1;
*/
public Builder clearRow() {
bitField0_ = (bitField0_ & ~0x00000001);
row_ = getDefaultInstance().getRow();
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.Cursor)
}
static {
defaultInstance = new Cursor(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.Cursor)
}
public interface ScanResponseOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated uint32 cells_per_result = 1;
/**
* repeated uint32 cells_per_result = 1;
*
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
* A server may choose to limit the number of results returned to the client for
* reasons such as the size in bytes or quantity of results accumulated. This field
* will true when more results exist in the current region.
*
* A server may choose to limit the number of results returned to the client for
* reasons such as the size in bytes or quantity of results accumulated. This field
* will true when more results exist in the current region.
*
* This field is filled in if the server is sending back a heartbeat message.
* Heartbeat messages are sent back to the client to prevent the scanner from
* timing out. Seeing a heartbeat message communicates to the Client that the
* server would have continued to scan had the time limit not been reached.
*
* This field is filled in if the server is sending back a heartbeat message.
* Heartbeat messages are sent back to the client to prevent the scanner from
* timing out. Seeing a heartbeat message communicates to the Client that the
* server would have continued to scan had the time limit not been reached.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* The mvcc read point which is used to open the scanner at server side. Client can
* make use of this mvcc_read_point when restarting a scanner to get a consistent view
* of a row.
*
* The mvcc read point which is used to open the scanner at server side. Client can
* make use of this mvcc_read_point when restarting a scanner to get a consistent view
* of a row.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
**
* The scan response. If there are no more results, more_results will
* be false. If it is not specified, it means there are more.
*
*/
@javax.annotation.Generated("proto") public static final class ScanResponse extends
com.google.protobuf.GeneratedMessage
implements ScanResponseOrBuilder {
// Use ScanResponse.newBuilder() to construct.
private ScanResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ScanResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ScanResponse defaultInstance;
public static ScanResponse getDefaultInstance() {
return defaultInstance;
}
public ScanResponse getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ScanResponse(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
cellsPerResult_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
cellsPerResult_.add(input.readUInt32());
break;
}
case 10: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) {
cellsPerResult_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000001;
}
while (input.getBytesUntilLimit() > 0) {
cellsPerResult_.add(input.readUInt32());
}
input.popLimit(limit);
break;
}
case 16: {
bitField0_ |= 0x00000001;
scannerId_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000002;
moreResults_ = input.readBool();
break;
}
case 32: {
bitField0_ |= 0x00000004;
ttl_ = input.readUInt32();
break;
}
case 42: {
if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
results_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000010;
}
results_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.PARSER, extensionRegistry));
break;
}
case 48: {
bitField0_ |= 0x00000008;
stale_ = input.readBool();
break;
}
case 56: {
if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
partialFlagPerResult_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000040;
}
partialFlagPerResult_.add(input.readBool());
break;
}
case 58: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000040) == 0x00000040) && input.getBytesUntilLimit() > 0) {
partialFlagPerResult_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000040;
}
while (input.getBytesUntilLimit() > 0) {
partialFlagPerResult_.add(input.readBool());
}
input.popLimit(limit);
break;
}
case 64: {
bitField0_ |= 0x00000010;
moreResultsInRegion_ = input.readBool();
break;
}
case 72: {
bitField0_ |= 0x00000020;
heartbeatMessage_ = input.readBool();
break;
}
case 82: {
org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics.Builder subBuilder = null;
if (((bitField0_ & 0x00000040) == 0x00000040)) {
subBuilder = scanMetrics_.toBuilder();
}
scanMetrics_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(scanMetrics_);
scanMetrics_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000040;
break;
}
case 88: {
bitField0_ |= 0x00000080;
mvccReadPoint_ = input.readUInt64();
break;
}
case 98: {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder subBuilder = null;
if (((bitField0_ & 0x00000100) == 0x00000100)) {
subBuilder = cursor_.toBuilder();
}
cursor_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(cursor_);
cursor_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000100;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
cellsPerResult_ = java.util.Collections.unmodifiableList(cellsPerResult_);
}
if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
results_ = java.util.Collections.unmodifiableList(results_);
}
if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
partialFlagPerResult_ = java.util.Collections.unmodifiableList(partialFlagPerResult_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_ScanResponse_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_hbase_pb_ScanResponse_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ScanResponse parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ScanResponse(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// repeated uint32 cells_per_result = 1;
public static final int CELLS_PER_RESULT_FIELD_NUMBER = 1;
private java.util.List cellsPerResult_;
/**
* repeated uint32 cells_per_result = 1;
*
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
*/
public int getCellsPerResultCount() {
return cellsPerResult_.size();
}
/**
* repeated uint32 cells_per_result = 1;
*
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
*/
public int getCellsPerResult(int index) {
return cellsPerResult_.get(index);
}
// optional uint64 scanner_id = 2;
public static final int SCANNER_ID_FIELD_NUMBER = 2;
private long scannerId_;
/**
* optional uint64 scanner_id = 2;
*/
public boolean hasScannerId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional uint64 scanner_id = 2;
*/
public long getScannerId() {
return scannerId_;
}
// optional bool more_results = 3;
public static final int MORE_RESULTS_FIELD_NUMBER = 3;
private boolean moreResults_;
/**
* optional bool more_results = 3;
*/
public boolean hasMoreResults() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bool more_results = 3;
*/
public boolean getMoreResults() {
return moreResults_;
}
// optional uint32 ttl = 4;
public static final int TTL_FIELD_NUMBER = 4;
private int ttl_;
/**
* optional uint32 ttl = 4;
*/
public boolean hasTtl() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint32 ttl = 4;
*/
public int getTtl() {
return ttl_;
}
// repeated .hbase.pb.Result results = 5;
public static final int RESULTS_FIELD_NUMBER = 5;
private java.util.List results_;
/**
* repeated .hbase.pb.Result results = 5;
*
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
*/
public int getResultsCount() {
return results_.size();
}
/**
* repeated .hbase.pb.Result results = 5;
*
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultsOrBuilder(
int index) {
return results_.get(index);
}
// optional bool stale = 6;
public static final int STALE_FIELD_NUMBER = 6;
private boolean stale_;
/**
* optional bool stale = 6;
*/
public boolean hasStale() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* optional bool stale = 6;
*/
public boolean getStale() {
return stale_;
}
// repeated bool partial_flag_per_result = 7;
public static final int PARTIAL_FLAG_PER_RESULT_FIELD_NUMBER = 7;
private java.util.List partialFlagPerResult_;
/**
* repeated bool partial_flag_per_result = 7;
*
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
*/
public int getPartialFlagPerResultCount() {
return partialFlagPerResult_.size();
}
/**
* repeated bool partial_flag_per_result = 7;
*
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
*/
public boolean getPartialFlagPerResult(int index) {
return partialFlagPerResult_.get(index);
}
// optional bool more_results_in_region = 8;
public static final int MORE_RESULTS_IN_REGION_FIELD_NUMBER = 8;
private boolean moreResultsInRegion_;
/**
* optional bool more_results_in_region = 8;
*
*
* A server may choose to limit the number of results returned to the client for
* reasons such as the size in bytes or quantity of results accumulated. This field
* will true when more results exist in the current region.
*
* A server may choose to limit the number of results returned to the client for
* reasons such as the size in bytes or quantity of results accumulated. This field
* will true when more results exist in the current region.
*
*/
public boolean getMoreResultsInRegion() {
return moreResultsInRegion_;
}
// optional bool heartbeat_message = 9;
public static final int HEARTBEAT_MESSAGE_FIELD_NUMBER = 9;
private boolean heartbeatMessage_;
/**
* optional bool heartbeat_message = 9;
*
*
* This field is filled in if the server is sending back a heartbeat message.
* Heartbeat messages are sent back to the client to prevent the scanner from
* timing out. Seeing a heartbeat message communicates to the Client that the
* server would have continued to scan had the time limit not been reached.
*
* This field is filled in if the server is sending back a heartbeat message.
* Heartbeat messages are sent back to the client to prevent the scanner from
* timing out. Seeing a heartbeat message communicates to the Client that the
* server would have continued to scan had the time limit not been reached.
*
*/
public boolean getHeartbeatMessage() {
return heartbeatMessage_;
}
// optional .hbase.pb.ScanMetrics scan_metrics = 10;
public static final int SCAN_METRICS_FIELD_NUMBER = 10;
private org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics scanMetrics_;
/**
* optional .hbase.pb.ScanMetrics scan_metrics = 10;
*
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
*/
public org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetricsOrBuilder getScanMetricsOrBuilder() {
return scanMetrics_;
}
// optional uint64 mvcc_read_point = 11 [default = 0];
public static final int MVCC_READ_POINT_FIELD_NUMBER = 11;
private long mvccReadPoint_;
/**
* optional uint64 mvcc_read_point = 11 [default = 0];
*
*
* The mvcc read point which is used to open the scanner at server side. Client can
* make use of this mvcc_read_point when restarting a scanner to get a consistent view
* of a row.
*
* The mvcc read point which is used to open the scanner at server side. Client can
* make use of this mvcc_read_point when restarting a scanner to get a consistent view
* of a row.
*
*/
public long getMvccReadPoint() {
return mvccReadPoint_;
}
// optional .hbase.pb.Cursor cursor = 12;
public static final int CURSOR_FIELD_NUMBER = 12;
private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor cursor_;
/**
* optional .hbase.pb.Cursor cursor = 12;
*
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder getCursorOrBuilder() {
return cursor_;
}
private void initFields() {
cellsPerResult_ = java.util.Collections.emptyList();
scannerId_ = 0L;
moreResults_ = false;
ttl_ = 0;
results_ = java.util.Collections.emptyList();
stale_ = false;
partialFlagPerResult_ = java.util.Collections.emptyList();
moreResultsInRegion_ = false;
heartbeatMessage_ = false;
scanMetrics_ = org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics.getDefaultInstance();
mvccReadPoint_ = 0L;
cursor_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < cellsPerResult_.size(); i++) {
output.writeUInt32(1, cellsPerResult_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(2, scannerId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(3, moreResults_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(4, ttl_);
}
for (int i = 0; i < results_.size(); i++) {
output.writeMessage(5, results_.get(i));
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBool(6, stale_);
}
for (int i = 0; i < partialFlagPerResult_.size(); i++) {
output.writeBool(7, partialFlagPerResult_.get(i));
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeBool(8, moreResultsInRegion_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBool(9, heartbeatMessage_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeMessage(10, scanMetrics_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeUInt64(11, mvccReadPoint_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeMessage(12, cursor_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < cellsPerResult_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt32SizeNoTag(cellsPerResult_.get(i));
}
size += dataSize;
size += 1 * getCellsPerResultList().size();
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, scannerId_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(3, moreResults_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(4, ttl_);
}
for (int i = 0; i < results_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, results_.get(i));
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(6, stale_);
}
{
int dataSize = 0;
dataSize = 1 * getPartialFlagPerResultList().size();
size += dataSize;
size += 1 * getPartialFlagPerResultList().size();
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(8, moreResultsInRegion_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(9, heartbeatMessage_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(10, scanMetrics_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(11, mvccReadPoint_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(12, cursor_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse) obj;
boolean result = true;
result = result && getCellsPerResultList()
.equals(other.getCellsPerResultList());
result = result && (hasScannerId() == other.hasScannerId());
if (hasScannerId()) {
result = result && (getScannerId()
== other.getScannerId());
}
result = result && (hasMoreResults() == other.hasMoreResults());
if (hasMoreResults()) {
result = result && (getMoreResults()
== other.getMoreResults());
}
result = result && (hasTtl() == other.hasTtl());
if (hasTtl()) {
result = result && (getTtl()
== other.getTtl());
}
result = result && getResultsList()
.equals(other.getResultsList());
result = result && (hasStale() == other.hasStale());
if (hasStale()) {
result = result && (getStale()
== other.getStale());
}
result = result && getPartialFlagPerResultList()
.equals(other.getPartialFlagPerResultList());
result = result && (hasMoreResultsInRegion() == other.hasMoreResultsInRegion());
if (hasMoreResultsInRegion()) {
result = result && (getMoreResultsInRegion()
== other.getMoreResultsInRegion());
}
result = result && (hasHeartbeatMessage() == other.hasHeartbeatMessage());
if (hasHeartbeatMessage()) {
result = result && (getHeartbeatMessage()
== other.getHeartbeatMessage());
}
result = result && (hasScanMetrics() == other.hasScanMetrics());
if (hasScanMetrics()) {
result = result && getScanMetrics()
.equals(other.getScanMetrics());
}
result = result && (hasMvccReadPoint() == other.hasMvccReadPoint());
if (hasMvccReadPoint()) {
result = result && (getMvccReadPoint()
== other.getMvccReadPoint());
}
result = result && (hasCursor() == other.hasCursor());
if (hasCursor()) {
result = result && getCursor()
.equals(other.getCursor());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (getCellsPerResultCount() > 0) {
hash = (37 * hash) + CELLS_PER_RESULT_FIELD_NUMBER;
hash = (53 * hash) + getCellsPerResultList().hashCode();
}
if (hasScannerId()) {
hash = (37 * hash) + SCANNER_ID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getScannerId());
}
if (hasMoreResults()) {
hash = (37 * hash) + MORE_RESULTS_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getMoreResults());
}
if (hasTtl()) {
hash = (37 * hash) + TTL_FIELD_NUMBER;
hash = (53 * hash) + getTtl();
}
if (getResultsCount() > 0) {
hash = (37 * hash) + RESULTS_FIELD_NUMBER;
hash = (53 * hash) + getResultsList().hashCode();
}
if (hasStale()) {
hash = (37 * hash) + STALE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getStale());
}
if (getPartialFlagPerResultCount() > 0) {
hash = (37 * hash) + PARTIAL_FLAG_PER_RESULT_FIELD_NUMBER;
hash = (53 * hash) + getPartialFlagPerResultList().hashCode();
}
if (hasMoreResultsInRegion()) {
hash = (37 * hash) + MORE_RESULTS_IN_REGION_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getMoreResultsInRegion());
}
if (hasHeartbeatMessage()) {
hash = (37 * hash) + HEARTBEAT_MESSAGE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getHeartbeatMessage());
}
if (hasScanMetrics()) {
hash = (37 * hash) + SCAN_METRICS_FIELD_NUMBER;
hash = (53 * hash) + getScanMetrics().hashCode();
}
if (hasMvccReadPoint()) {
hash = (37 * hash) + MVCC_READ_POINT_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getMvccReadPoint());
}
if (hasCursor()) {
hash = (37 * hash) + CURSOR_FIELD_NUMBER;
hash = (53 * hash) + getCursor().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.ScanResponse}
*
*
**
* The scan response. If there are no more results, more_results will
* be false. If it is not specified, it means there are more.
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
*/
public int getCellsPerResultCount() {
return cellsPerResult_.size();
}
/**
* repeated uint32 cells_per_result = 1;
*
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
*/
public int getCellsPerResult(int index) {
return cellsPerResult_.get(index);
}
/**
* repeated uint32 cells_per_result = 1;
*
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
*/
public Builder setCellsPerResult(
int index, int value) {
ensureCellsPerResultIsMutable();
cellsPerResult_.set(index, value);
onChanged();
return this;
}
/**
* repeated uint32 cells_per_result = 1;
*
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
* This field is filled in if we are doing cellblocks. A cellblock is made up
* of all Cells serialized out as one cellblock BUT responses from a server
* have their Cells grouped by Result. So we can reconstitute the
* Results on the client-side, this field is a list of counts of Cells
* in each Result that makes up the response. For example, if this field
* has 3, 3, 3 in it, then we know that on the client, we are to make
* three Results each of three Cells each.
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
*/
public int getResultsCount() {
if (resultsBuilder_ == null) {
return results_.size();
} else {
return resultsBuilder_.getCount();
}
}
/**
* repeated .hbase.pb.Result results = 5;
*
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
*/
public Builder setResults(
int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result value) {
if (resultsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureResultsIsMutable();
results_.set(index, value);
onChanged();
} else {
resultsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hbase.pb.Result results = 5;
*
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
*/
public Builder addResults(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result value) {
if (resultsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureResultsIsMutable();
results_.add(value);
onChanged();
} else {
resultsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hbase.pb.Result results = 5;
*
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
*/
public Builder addResults(
int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result value) {
if (resultsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureResultsIsMutable();
results_.add(index, value);
onChanged();
} else {
resultsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hbase.pb.Result results = 5;
*
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder getResultsBuilder(
int index) {
return getResultsFieldBuilder().getBuilder(index);
}
/**
* repeated .hbase.pb.Result results = 5;
*
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
*/
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultsOrBuilder(
int index) {
if (resultsBuilder_ == null) {
return results_.get(index); } else {
return resultsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hbase.pb.Result results = 5;
*
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* If cells are not carried in an accompanying cellblock, then they are pb'd here.
* This field is mutually exclusive with cells_per_result (since the Cells will
* be inside the pb'd Result)
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
*/
public int getPartialFlagPerResultCount() {
return partialFlagPerResult_.size();
}
/**
* repeated bool partial_flag_per_result = 7;
*
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
* This field is filled in if we are doing cellblocks. In the event that a row
* could not fit all of its cells into a single RPC chunk, the results will be
* returned as partials, and reconstructed into a complete result on the client
* side. This field is a list of flags indicating whether or not the result
* that the cells belong to is a partial result. For example, if this field
* has false, false, true in it, then we know that on the client side, we need to
* make another RPC request since the last result was only a partial.
*
* A server may choose to limit the number of results returned to the client for
* reasons such as the size in bytes or quantity of results accumulated. This field
* will true when more results exist in the current region.
*
* A server may choose to limit the number of results returned to the client for
* reasons such as the size in bytes or quantity of results accumulated. This field
* will true when more results exist in the current region.
*
* A server may choose to limit the number of results returned to the client for
* reasons such as the size in bytes or quantity of results accumulated. This field
* will true when more results exist in the current region.
*
* A server may choose to limit the number of results returned to the client for
* reasons such as the size in bytes or quantity of results accumulated. This field
* will true when more results exist in the current region.
*
* This field is filled in if the server is sending back a heartbeat message.
* Heartbeat messages are sent back to the client to prevent the scanner from
* timing out. Seeing a heartbeat message communicates to the Client that the
* server would have continued to scan had the time limit not been reached.
*
* This field is filled in if the server is sending back a heartbeat message.
* Heartbeat messages are sent back to the client to prevent the scanner from
* timing out. Seeing a heartbeat message communicates to the Client that the
* server would have continued to scan had the time limit not been reached.
*
* This field is filled in if the server is sending back a heartbeat message.
* Heartbeat messages are sent back to the client to prevent the scanner from
* timing out. Seeing a heartbeat message communicates to the Client that the
* server would have continued to scan had the time limit not been reached.
*
* This field is filled in if the server is sending back a heartbeat message.
* Heartbeat messages are sent back to the client to prevent the scanner from
* timing out. Seeing a heartbeat message communicates to the Client that the
* server would have continued to scan had the time limit not been reached.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* This field is filled in if the client has requested that scan metrics be tracked.
* The metrics tracked here are sent back to the client to be tracked together with
* the existing client side metrics.
*
* The mvcc read point which is used to open the scanner at server side. Client can
* make use of this mvcc_read_point when restarting a scanner to get a consistent view
* of a row.
*
* The mvcc read point which is used to open the scanner at server side. Client can
* make use of this mvcc_read_point when restarting a scanner to get a consistent view
* of a row.
*
*/
public long getMvccReadPoint() {
return mvccReadPoint_;
}
/**
* optional uint64 mvcc_read_point = 11 [default = 0];
*
*
* The mvcc read point which is used to open the scanner at server side. Client can
* make use of this mvcc_read_point when restarting a scanner to get a consistent view
* of a row.
*
* The mvcc read point which is used to open the scanner at server side. Client can
* make use of this mvcc_read_point when restarting a scanner to get a consistent view
* of a row.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*
* If the Scan need cursor, return the row key we are scanning in heartbeat message.
* If the Scan doesn't need a cursor, don't set this field to reduce network IO.
*