Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos Maven / Gradle / Ivy
Go to download
Protobuf protocol classes used by HBase to communicate.
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: ZooKeeper.proto
package org.apache.hadoop.hbase.protobuf.generated;
@javax.annotation.Generated("proto") public final class ZooKeeperProtos {
private ZooKeeperProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public interface MetaRegionServerOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hbase.pb.ServerName server = 1;
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
boolean hasServer();
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer();
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder();
// optional uint32 rpc_version = 2;
/**
* optional uint32 rpc_version = 2;
*
*
* The major version of the rpc the server speaks. This is used so that
* clients connecting to the cluster can have prior knowledge of what version
* to send to a RegionServer. AsyncHBase will use this to detect versions.
*
*/
boolean hasRpcVersion();
/**
* optional uint32 rpc_version = 2;
*
*
* The major version of the rpc the server speaks. This is used so that
* clients connecting to the cluster can have prior knowledge of what version
* to send to a RegionServer. AsyncHBase will use this to detect versions.
*
*/
int getRpcVersion();
// optional .hbase.pb.RegionState.State state = 3;
/**
* optional .hbase.pb.RegionState.State state = 3;
*
*
* State of the region transition. OPEN means fully operational 'hbase:meta'
*
*/
boolean hasState();
/**
* optional .hbase.pb.RegionState.State state = 3;
*
*
* State of the region transition. OPEN means fully operational 'hbase:meta'
*
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState();
}
/**
* Protobuf type {@code hbase.pb.MetaRegionServer}
*
*
**
* Content of the meta-region-server znode.
*
*/
@javax.annotation.Generated("proto") public static final class MetaRegionServer extends
com.google.protobuf.GeneratedMessage
implements MetaRegionServerOrBuilder {
// Use MetaRegionServer.newBuilder() to construct.
private MetaRegionServer(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private MetaRegionServer(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final MetaRegionServer defaultInstance;
public static MetaRegionServer getDefaultInstance() {
return defaultInstance;
}
public MetaRegionServer getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private MetaRegionServer(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = server_.toBuilder();
}
server_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(server_);
server_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
rpcVersion_ = input.readUInt32();
break;
}
case 24: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State value = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(3, rawValue);
} else {
bitField0_ |= 0x00000004;
state_ = value;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public MetaRegionServer parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new MetaRegionServer(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hbase.pb.ServerName server = 1;
public static final int SERVER_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_;
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
public boolean hasServer() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() {
return server_;
}
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() {
return server_;
}
// optional uint32 rpc_version = 2;
public static final int RPC_VERSION_FIELD_NUMBER = 2;
private int rpcVersion_;
/**
* optional uint32 rpc_version = 2;
*
*
* The major version of the rpc the server speaks. This is used so that
* clients connecting to the cluster can have prior knowledge of what version
* to send to a RegionServer. AsyncHBase will use this to detect versions.
*
*/
public boolean hasRpcVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint32 rpc_version = 2;
*
*
* The major version of the rpc the server speaks. This is used so that
* clients connecting to the cluster can have prior knowledge of what version
* to send to a RegionServer. AsyncHBase will use this to detect versions.
*
*/
public int getRpcVersion() {
return rpcVersion_;
}
// optional .hbase.pb.RegionState.State state = 3;
public static final int STATE_FIELD_NUMBER = 3;
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State state_;
/**
* optional .hbase.pb.RegionState.State state = 3;
*
*
* State of the region transition. OPEN means fully operational 'hbase:meta'
*
*/
public boolean hasState() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hbase.pb.RegionState.State state = 3;
*
*
* State of the region transition. OPEN means fully operational 'hbase:meta'
*
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState() {
return state_;
}
private void initFields() {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
rpcVersion_ = 0;
state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasServer()) {
memoizedIsInitialized = 0;
return false;
}
if (!getServer().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, server_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, rpcVersion_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeEnum(3, state_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, server_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, rpcVersion_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(3, state_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) obj;
boolean result = true;
result = result && (hasServer() == other.hasServer());
if (hasServer()) {
result = result && getServer()
.equals(other.getServer());
}
result = result && (hasRpcVersion() == other.hasRpcVersion());
if (hasRpcVersion()) {
result = result && (getRpcVersion()
== other.getRpcVersion());
}
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasServer()) {
hash = (37 * hash) + SERVER_FIELD_NUMBER;
hash = (53 * hash) + getServer().hashCode();
}
if (hasRpcVersion()) {
hash = (37 * hash) + RPC_VERSION_FIELD_NUMBER;
hash = (53 * hash) + getRpcVersion();
}
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.MetaRegionServer}
*
*
**
* Content of the meta-region-server znode.
*
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServerOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getServerFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (serverBuilder_ == null) {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
} else {
serverBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
rpcVersion_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_MetaRegionServer_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (serverBuilder_ == null) {
result.server_ = server_;
} else {
result.server_ = serverBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.rpcVersion_ = rpcVersion_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.state_ = state_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDefaultInstance()) return this;
if (other.hasServer()) {
mergeServer(other.getServer());
}
if (other.hasRpcVersion()) {
setRpcVersion(other.getRpcVersion());
}
if (other.hasState()) {
setState(other.getState());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasServer()) {
return false;
}
if (!getServer().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hbase.pb.ServerName server = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_;
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
public boolean hasServer() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() {
if (serverBuilder_ == null) {
return server_;
} else {
return serverBuilder_.getMessage();
}
}
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (serverBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
server_ = value;
onChanged();
} else {
serverBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
public Builder setServer(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (serverBuilder_ == null) {
server_ = builderForValue.build();
onChanged();
} else {
serverBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (serverBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
server_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial();
} else {
server_ = value;
}
onChanged();
} else {
serverBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
public Builder clearServer() {
if (serverBuilder_ == null) {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
onChanged();
} else {
serverBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getServerFieldBuilder().getBuilder();
}
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() {
if (serverBuilder_ != null) {
return serverBuilder_.getMessageOrBuilder();
} else {
return server_;
}
}
/**
* required .hbase.pb.ServerName server = 1;
*
*
* The ServerName hosting the meta region currently, or destination server,
* if meta region is in transition.
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getServerFieldBuilder() {
if (serverBuilder_ == null) {
serverBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
server_,
getParentForChildren(),
isClean());
server_ = null;
}
return serverBuilder_;
}
// optional uint32 rpc_version = 2;
private int rpcVersion_ ;
/**
* optional uint32 rpc_version = 2;
*
*
* The major version of the rpc the server speaks. This is used so that
* clients connecting to the cluster can have prior knowledge of what version
* to send to a RegionServer. AsyncHBase will use this to detect versions.
*
*/
public boolean hasRpcVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint32 rpc_version = 2;
*
*
* The major version of the rpc the server speaks. This is used so that
* clients connecting to the cluster can have prior knowledge of what version
* to send to a RegionServer. AsyncHBase will use this to detect versions.
*
*/
public int getRpcVersion() {
return rpcVersion_;
}
/**
* optional uint32 rpc_version = 2;
*
*
* The major version of the rpc the server speaks. This is used so that
* clients connecting to the cluster can have prior knowledge of what version
* to send to a RegionServer. AsyncHBase will use this to detect versions.
*
*/
public Builder setRpcVersion(int value) {
bitField0_ |= 0x00000002;
rpcVersion_ = value;
onChanged();
return this;
}
/**
* optional uint32 rpc_version = 2;
*
*
* The major version of the rpc the server speaks. This is used so that
* clients connecting to the cluster can have prior knowledge of what version
* to send to a RegionServer. AsyncHBase will use this to detect versions.
*
*/
public Builder clearRpcVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
rpcVersion_ = 0;
onChanged();
return this;
}
// optional .hbase.pb.RegionState.State state = 3;
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
/**
* optional .hbase.pb.RegionState.State state = 3;
*
*
* State of the region transition. OPEN means fully operational 'hbase:meta'
*
*/
public boolean hasState() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional .hbase.pb.RegionState.State state = 3;
*
*
* State of the region transition. OPEN means fully operational 'hbase:meta'
*
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState() {
return state_;
}
/**
* optional .hbase.pb.RegionState.State state = 3;
*
*
* State of the region transition. OPEN means fully operational 'hbase:meta'
*
*/
public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
state_ = value;
onChanged();
return this;
}
/**
* optional .hbase.pb.RegionState.State state = 3;
*
*
* State of the region transition. OPEN means fully operational 'hbase:meta'
*
*/
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000004);
state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.MetaRegionServer)
}
static {
defaultInstance = new MetaRegionServer(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.MetaRegionServer)
}
public interface MasterOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hbase.pb.ServerName master = 1;
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
boolean hasMaster();
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster();
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder();
// optional uint32 rpc_version = 2;
/**
* optional uint32 rpc_version = 2;
*
*
* Major RPC version so that clients can know what version the master can accept.
*
*/
boolean hasRpcVersion();
/**
* optional uint32 rpc_version = 2;
*
*
* Major RPC version so that clients can know what version the master can accept.
*
*/
int getRpcVersion();
// optional uint32 info_port = 3;
/**
* optional uint32 info_port = 3;
*/
boolean hasInfoPort();
/**
* optional uint32 info_port = 3;
*/
int getInfoPort();
}
/**
* Protobuf type {@code hbase.pb.Master}
*
*
**
* Content of the master znode.
*
*/
@javax.annotation.Generated("proto") public static final class Master extends
com.google.protobuf.GeneratedMessage
implements MasterOrBuilder {
// Use Master.newBuilder() to construct.
private Master(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Master(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Master defaultInstance;
public static Master getDefaultInstance() {
return defaultInstance;
}
public Master getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Master(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = master_.toBuilder();
}
master_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(master_);
master_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
rpcVersion_ = input.readUInt32();
break;
}
case 24: {
bitField0_ |= 0x00000004;
infoPort_ = input.readUInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public Master parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Master(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hbase.pb.ServerName master = 1;
public static final int MASTER_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_;
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
public boolean hasMaster() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() {
return master_;
}
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() {
return master_;
}
// optional uint32 rpc_version = 2;
public static final int RPC_VERSION_FIELD_NUMBER = 2;
private int rpcVersion_;
/**
* optional uint32 rpc_version = 2;
*
*
* Major RPC version so that clients can know what version the master can accept.
*
*/
public boolean hasRpcVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint32 rpc_version = 2;
*
*
* Major RPC version so that clients can know what version the master can accept.
*
*/
public int getRpcVersion() {
return rpcVersion_;
}
// optional uint32 info_port = 3;
public static final int INFO_PORT_FIELD_NUMBER = 3;
private int infoPort_;
/**
* optional uint32 info_port = 3;
*/
public boolean hasInfoPort() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint32 info_port = 3;
*/
public int getInfoPort() {
return infoPort_;
}
private void initFields() {
master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
rpcVersion_ = 0;
infoPort_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasMaster()) {
memoizedIsInitialized = 0;
return false;
}
if (!getMaster().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, master_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, rpcVersion_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, infoPort_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, master_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, rpcVersion_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, infoPort_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) obj;
boolean result = true;
result = result && (hasMaster() == other.hasMaster());
if (hasMaster()) {
result = result && getMaster()
.equals(other.getMaster());
}
result = result && (hasRpcVersion() == other.hasRpcVersion());
if (hasRpcVersion()) {
result = result && (getRpcVersion()
== other.getRpcVersion());
}
result = result && (hasInfoPort() == other.hasInfoPort());
if (hasInfoPort()) {
result = result && (getInfoPort()
== other.getInfoPort());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasMaster()) {
hash = (37 * hash) + MASTER_FIELD_NUMBER;
hash = (53 * hash) + getMaster().hashCode();
}
if (hasRpcVersion()) {
hash = (37 * hash) + RPC_VERSION_FIELD_NUMBER;
hash = (53 * hash) + getRpcVersion();
}
if (hasInfoPort()) {
hash = (37 * hash) + INFO_PORT_FIELD_NUMBER;
hash = (53 * hash) + getInfoPort();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.Master}
*
*
**
* Content of the master znode.
*
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MasterOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getMasterFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (masterBuilder_ == null) {
master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
} else {
masterBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
rpcVersion_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
infoPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_Master_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (masterBuilder_ == null) {
result.master_ = master_;
} else {
result.master_ = masterBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.rpcVersion_ = rpcVersion_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.infoPort_ = infoPort_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master.getDefaultInstance()) return this;
if (other.hasMaster()) {
mergeMaster(other.getMaster());
}
if (other.hasRpcVersion()) {
setRpcVersion(other.getRpcVersion());
}
if (other.hasInfoPort()) {
setInfoPort(other.getInfoPort());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasMaster()) {
return false;
}
if (!getMaster().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Master) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hbase.pb.ServerName master = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> masterBuilder_;
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
public boolean hasMaster() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() {
if (masterBuilder_ == null) {
return master_;
} else {
return masterBuilder_.getMessage();
}
}
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
public Builder setMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (masterBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
master_ = value;
onChanged();
} else {
masterBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
public Builder setMaster(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (masterBuilder_ == null) {
master_ = builderForValue.build();
onChanged();
} else {
masterBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
public Builder mergeMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (masterBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
master_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
master_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(master_).mergeFrom(value).buildPartial();
} else {
master_ = value;
}
onChanged();
} else {
masterBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
public Builder clearMaster() {
if (masterBuilder_ == null) {
master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
onChanged();
} else {
masterBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getMasterBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getMasterFieldBuilder().getBuilder();
}
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() {
if (masterBuilder_ != null) {
return masterBuilder_.getMessageOrBuilder();
} else {
return master_;
}
}
/**
* required .hbase.pb.ServerName master = 1;
*
*
* The ServerName of the current Master
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getMasterFieldBuilder() {
if (masterBuilder_ == null) {
masterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
master_,
getParentForChildren(),
isClean());
master_ = null;
}
return masterBuilder_;
}
// optional uint32 rpc_version = 2;
private int rpcVersion_ ;
/**
* optional uint32 rpc_version = 2;
*
*
* Major RPC version so that clients can know what version the master can accept.
*
*/
public boolean hasRpcVersion() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional uint32 rpc_version = 2;
*
*
* Major RPC version so that clients can know what version the master can accept.
*
*/
public int getRpcVersion() {
return rpcVersion_;
}
/**
* optional uint32 rpc_version = 2;
*
*
* Major RPC version so that clients can know what version the master can accept.
*
*/
public Builder setRpcVersion(int value) {
bitField0_ |= 0x00000002;
rpcVersion_ = value;
onChanged();
return this;
}
/**
* optional uint32 rpc_version = 2;
*
*
* Major RPC version so that clients can know what version the master can accept.
*
*/
public Builder clearRpcVersion() {
bitField0_ = (bitField0_ & ~0x00000002);
rpcVersion_ = 0;
onChanged();
return this;
}
// optional uint32 info_port = 3;
private int infoPort_ ;
/**
* optional uint32 info_port = 3;
*/
public boolean hasInfoPort() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional uint32 info_port = 3;
*/
public int getInfoPort() {
return infoPort_;
}
/**
* optional uint32 info_port = 3;
*/
public Builder setInfoPort(int value) {
bitField0_ |= 0x00000004;
infoPort_ = value;
onChanged();
return this;
}
/**
* optional uint32 info_port = 3;
*/
public Builder clearInfoPort() {
bitField0_ = (bitField0_ & ~0x00000004);
infoPort_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.Master)
}
static {
defaultInstance = new Master(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.Master)
}
public interface ClusterUpOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string start_date = 1;
/**
* required string start_date = 1;
*
*
* If this znode is present, cluster is up. Currently
* the data is cluster start_date.
*
*/
boolean hasStartDate();
/**
* required string start_date = 1;
*
*
* If this znode is present, cluster is up. Currently
* the data is cluster start_date.
*
*/
java.lang.String getStartDate();
/**
* required string start_date = 1;
*
*
* If this znode is present, cluster is up. Currently
* the data is cluster start_date.
*
*/
com.google.protobuf.ByteString
getStartDateBytes();
}
/**
* Protobuf type {@code hbase.pb.ClusterUp}
*
*
**
* Content of the '/hbase/running', cluster state, znode.
*
*/
@javax.annotation.Generated("proto") public static final class ClusterUp extends
com.google.protobuf.GeneratedMessage
implements ClusterUpOrBuilder {
// Use ClusterUp.newBuilder() to construct.
private ClusterUp(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ClusterUp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ClusterUp defaultInstance;
public static ClusterUp getDefaultInstance() {
return defaultInstance;
}
public ClusterUp getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ClusterUp(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
startDate_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ClusterUp parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ClusterUp(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string start_date = 1;
public static final int START_DATE_FIELD_NUMBER = 1;
private java.lang.Object startDate_;
/**
* required string start_date = 1;
*
*
* If this znode is present, cluster is up. Currently
* the data is cluster start_date.
*
*/
public boolean hasStartDate() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string start_date = 1;
*
*
* If this znode is present, cluster is up. Currently
* the data is cluster start_date.
*
*/
public java.lang.String getStartDate() {
java.lang.Object ref = startDate_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
startDate_ = s;
}
return s;
}
}
/**
* required string start_date = 1;
*
*
* If this znode is present, cluster is up. Currently
* the data is cluster start_date.
*
*/
public com.google.protobuf.ByteString
getStartDateBytes() {
java.lang.Object ref = startDate_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
startDate_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
startDate_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasStartDate()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getStartDateBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getStartDateBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) obj;
boolean result = true;
result = result && (hasStartDate() == other.hasStartDate());
if (hasStartDate()) {
result = result && getStartDate()
.equals(other.getStartDate());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasStartDate()) {
hash = (37 * hash) + START_DATE_FIELD_NUMBER;
hash = (53 * hash) + getStartDate().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.ClusterUp}
*
*
**
* Content of the '/hbase/running', cluster state, znode.
*
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUpOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
startDate_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ClusterUp_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.startDate_ = startDate_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.getDefaultInstance()) return this;
if (other.hasStartDate()) {
bitField0_ |= 0x00000001;
startDate_ = other.startDate_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasStartDate()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string start_date = 1;
private java.lang.Object startDate_ = "";
/**
* required string start_date = 1;
*
*
* If this znode is present, cluster is up. Currently
* the data is cluster start_date.
*
*/
public boolean hasStartDate() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string start_date = 1;
*
*
* If this znode is present, cluster is up. Currently
* the data is cluster start_date.
*
*/
public java.lang.String getStartDate() {
java.lang.Object ref = startDate_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
startDate_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string start_date = 1;
*
*
* If this znode is present, cluster is up. Currently
* the data is cluster start_date.
*
*/
public com.google.protobuf.ByteString
getStartDateBytes() {
java.lang.Object ref = startDate_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
startDate_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string start_date = 1;
*
*
* If this znode is present, cluster is up. Currently
* the data is cluster start_date.
*
*/
public Builder setStartDate(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
startDate_ = value;
onChanged();
return this;
}
/**
* required string start_date = 1;
*
*
* If this znode is present, cluster is up. Currently
* the data is cluster start_date.
*
*/
public Builder clearStartDate() {
bitField0_ = (bitField0_ & ~0x00000001);
startDate_ = getDefaultInstance().getStartDate();
onChanged();
return this;
}
/**
* required string start_date = 1;
*
*
* If this znode is present, cluster is up. Currently
* the data is cluster start_date.
*
*/
public Builder setStartDateBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
startDate_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.ClusterUp)
}
static {
defaultInstance = new ClusterUp(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.ClusterUp)
}
public interface SplitLogTaskOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hbase.pb.SplitLogTask.State state = 1;
/**
* required .hbase.pb.SplitLogTask.State state = 1;
*/
boolean hasState();
/**
* required .hbase.pb.SplitLogTask.State state = 1;
*/
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState();
// required .hbase.pb.ServerName server_name = 2;
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
boolean hasServerName();
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName();
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
}
/**
* Protobuf type {@code hbase.pb.SplitLogTask}
*
*
**
* WAL SplitLog directory znodes have this for content. Used doing distributed
* WAL splitting. Holds current state and name of server that originated split.
*
*/
@javax.annotation.Generated("proto") public static final class SplitLogTask extends
com.google.protobuf.GeneratedMessage
implements SplitLogTaskOrBuilder {
// Use SplitLogTask.newBuilder() to construct.
private SplitLogTask(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private SplitLogTask(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final SplitLogTask defaultInstance;
public static SplitLogTask getDefaultInstance() {
return defaultInstance;
}
public SplitLogTask getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SplitLogTask(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
state_ = value;
}
break;
}
case 18: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = serverName_.toBuilder();
}
serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(serverName_);
serverName_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitLogTask_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitLogTask_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public SplitLogTask parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new SplitLogTask(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hbase.pb.SplitLogTask.State}
*/
public enum State
implements com.google.protobuf.ProtocolMessageEnum {
/**
* UNASSIGNED = 0;
*/
UNASSIGNED(0, 0),
/**
* OWNED = 1;
*/
OWNED(1, 1),
/**
* RESIGNED = 2;
*/
RESIGNED(2, 2),
/**
* DONE = 3;
*/
DONE(3, 3),
/**
* ERR = 4;
*/
ERR(4, 4),
;
/**
* UNASSIGNED = 0;
*/
public static final int UNASSIGNED_VALUE = 0;
/**
* OWNED = 1;
*/
public static final int OWNED_VALUE = 1;
/**
* RESIGNED = 2;
*/
public static final int RESIGNED_VALUE = 2;
/**
* DONE = 3;
*/
public static final int DONE_VALUE = 3;
/**
* ERR = 4;
*/
public static final int ERR_VALUE = 4;
public final int getNumber() { return value; }
public static State valueOf(int value) {
switch (value) {
case 0: return UNASSIGNED;
case 1: return OWNED;
case 2: return RESIGNED;
case 3: return DONE;
case 4: return ERR;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public State findValueByNumber(int number) {
return State.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDescriptor().getEnumTypes().get(0);
}
private static final State[] VALUES = values();
public static State valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private State(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hbase.pb.SplitLogTask.State)
}
private int bitField0_;
// required .hbase.pb.SplitLogTask.State state = 1;
public static final int STATE_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State state_;
/**
* required .hbase.pb.SplitLogTask.State state = 1;
*/
public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.SplitLogTask.State state = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState() {
return state_;
}
// required .hbase.pb.ServerName server_name = 2;
public static final int SERVER_NAME_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_;
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
public boolean hasServerName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
return serverName_;
}
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
return serverName_;
}
private void initFields() {
state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED;
serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasState()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasServerName()) {
memoizedIsInitialized = 0;
return false;
}
if (!getServerName().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, state_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, serverName_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, state_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, serverName_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask) obj;
boolean result = true;
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result && (hasServerName() == other.hasServerName());
if (hasServerName()) {
result = result && getServerName()
.equals(other.getServerName());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
if (hasServerName()) {
hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
hash = (53 * hash) + getServerName().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.SplitLogTask}
*
*
**
* WAL SplitLog directory znodes have this for content. Used doing distributed
* WAL splitting. Holds current state and name of server that originated split.
*
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTaskOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitLogTask_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitLogTask_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getServerNameFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED;
bitField0_ = (bitField0_ & ~0x00000001);
if (serverNameBuilder_ == null) {
serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
} else {
serverNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitLogTask_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.state_ = state_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (serverNameBuilder_ == null) {
result.serverName_ = serverName_;
} else {
result.serverName_ = serverNameBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDefaultInstance()) return this;
if (other.hasState()) {
setState(other.getState());
}
if (other.hasServerName()) {
mergeServerName(other.getServerName());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasState()) {
return false;
}
if (!hasServerName()) {
return false;
}
if (!getServerName().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hbase.pb.SplitLogTask.State state = 1;
private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED;
/**
* required .hbase.pb.SplitLogTask.State state = 1;
*/
public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.SplitLogTask.State state = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState() {
return state_;
}
/**
* required .hbase.pb.SplitLogTask.State state = 1;
*/
public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
state_ = value;
onChanged();
return this;
}
/**
* required .hbase.pb.SplitLogTask.State state = 1;
*/
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000001);
state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED;
onChanged();
return this;
}
// required .hbase.pb.ServerName server_name = 2;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
public boolean hasServerName() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
if (serverNameBuilder_ == null) {
return serverName_;
} else {
return serverNameBuilder_.getMessage();
}
}
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (serverNameBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
serverName_ = value;
onChanged();
} else {
serverNameBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
public Builder setServerName(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (serverNameBuilder_ == null) {
serverName_ = builderForValue.build();
onChanged();
} else {
serverNameBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (serverNameBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
serverName_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial();
} else {
serverName_ = value;
}
onChanged();
} else {
serverNameBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
public Builder clearServerName() {
if (serverNameBuilder_ == null) {
serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
onChanged();
} else {
serverNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getServerNameFieldBuilder().getBuilder();
}
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
if (serverNameBuilder_ != null) {
return serverNameBuilder_.getMessageOrBuilder();
} else {
return serverName_;
}
}
/**
* required .hbase.pb.ServerName server_name = 2;
*
*
* optional RecoveryMode DEPRECATED_mode = 3 [default = UNKNOWN];
*
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getServerNameFieldBuilder() {
if (serverNameBuilder_ == null) {
serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
serverName_,
getParentForChildren(),
isClean());
serverName_ = null;
}
return serverNameBuilder_;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.SplitLogTask)
}
static {
defaultInstance = new SplitLogTask(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.SplitLogTask)
}
public interface DeprecatedTableStateOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
/**
* required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
*
*
* This is the table's state. If no znode for a table,
* its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class
* for more.
*
*/
boolean hasState();
/**
* required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
*
*
* This is the table's state. If no znode for a table,
* its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class
* for more.
*
*/
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState();
}
/**
* Protobuf type {@code hbase.pb.DeprecatedTableState}
*
*
**
* The znode that holds state of table.
* Deprected, table state is stored in table descriptor on HDFS.
*
*/
@javax.annotation.Generated("proto") public static final class DeprecatedTableState extends
com.google.protobuf.GeneratedMessage
implements DeprecatedTableStateOrBuilder {
// Use DeprecatedTableState.newBuilder() to construct.
private DeprecatedTableState(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private DeprecatedTableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final DeprecatedTableState defaultInstance;
public static DeprecatedTableState getDefaultInstance() {
return defaultInstance;
}
public DeprecatedTableState getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private DeprecatedTableState(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
state_ = value;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public DeprecatedTableState parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new DeprecatedTableState(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hbase.pb.DeprecatedTableState.State}
*
*
* Table's current state
*
*/
public enum State
implements com.google.protobuf.ProtocolMessageEnum {
/**
* ENABLED = 0;
*/
ENABLED(0, 0),
/**
* DISABLED = 1;
*/
DISABLED(1, 1),
/**
* DISABLING = 2;
*/
DISABLING(2, 2),
/**
* ENABLING = 3;
*/
ENABLING(3, 3),
;
/**
* ENABLED = 0;
*/
public static final int ENABLED_VALUE = 0;
/**
* DISABLED = 1;
*/
public static final int DISABLED_VALUE = 1;
/**
* DISABLING = 2;
*/
public static final int DISABLING_VALUE = 2;
/**
* ENABLING = 3;
*/
public static final int ENABLING_VALUE = 3;
public final int getNumber() { return value; }
public static State valueOf(int value) {
switch (value) {
case 0: return ENABLED;
case 1: return DISABLED;
case 2: return DISABLING;
case 3: return ENABLING;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public State findValueByNumber(int number) {
return State.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDescriptor().getEnumTypes().get(0);
}
private static final State[] VALUES = values();
public static State valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private State(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hbase.pb.DeprecatedTableState.State)
}
private int bitField0_;
// required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
public static final int STATE_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_;
/**
* required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
*
*
* This is the table's state. If no znode for a table,
* its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class
* for more.
*
*/
public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
*
*
* This is the table's state. If no znode for a table,
* its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class
* for more.
*
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() {
return state_;
}
private void initFields() {
state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasState()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, state_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, state_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) obj;
boolean result = true;
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.DeprecatedTableState}
*
*
**
* The znode that holds state of table.
* Deprected, table state is stored in table descriptor on HDFS.
*
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableStateOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_DeprecatedTableState_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.state_ = state_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance()) return this;
if (other.hasState()) {
setState(other.getState());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasState()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
/**
* required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
*
*
* This is the table's state. If no znode for a table,
* its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class
* for more.
*
*/
public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
*
*
* This is the table's state. If no znode for a table,
* its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class
* for more.
*
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() {
return state_;
}
/**
* required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
*
*
* This is the table's state. If no znode for a table,
* its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class
* for more.
*
*/
public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
state_ = value;
onChanged();
return this;
}
/**
* required .hbase.pb.DeprecatedTableState.State state = 1 [default = ENABLED];
*
*
* This is the table's state. If no znode for a table,
* its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class
* for more.
*
*/
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000001);
state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.DeprecatedTableState)
}
static {
defaultInstance = new DeprecatedTableState(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.DeprecatedTableState)
}
public interface TableCFOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional .hbase.pb.TableName table_name = 1;
/**
* optional .hbase.pb.TableName table_name = 1;
*/
boolean hasTableName();
/**
* optional .hbase.pb.TableName table_name = 1;
*/
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName();
/**
* optional .hbase.pb.TableName table_name = 1;
*/
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder();
// repeated bytes families = 2;
/**
* repeated bytes families = 2;
*/
java.util.List getFamiliesList();
/**
* repeated bytes families = 2;
*/
int getFamiliesCount();
/**
* repeated bytes families = 2;
*/
com.google.protobuf.ByteString getFamilies(int index);
}
/**
* Protobuf type {@code hbase.pb.TableCF}
*/
@javax.annotation.Generated("proto") public static final class TableCF extends
com.google.protobuf.GeneratedMessage
implements TableCFOrBuilder {
// Use TableCF.newBuilder() to construct.
private TableCF(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private TableCF(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final TableCF defaultInstance;
public static TableCF getDefaultInstance() {
return defaultInstance;
}
public TableCF getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TableCF(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = tableName_.toBuilder();
}
tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(tableName_);
tableName_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
families_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000002;
}
families_.add(input.readBytes());
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
families_ = java.util.Collections.unmodifiableList(families_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableCF_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableCF_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public TableCF parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new TableCF(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional .hbase.pb.TableName table_name = 1;
public static final int TABLE_NAME_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_;
/**
* optional .hbase.pb.TableName table_name = 1;
*/
public boolean hasTableName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional .hbase.pb.TableName table_name = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() {
return tableName_;
}
/**
* optional .hbase.pb.TableName table_name = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() {
return tableName_;
}
// repeated bytes families = 2;
public static final int FAMILIES_FIELD_NUMBER = 2;
private java.util.List families_;
/**
* repeated bytes families = 2;
*/
public java.util.List
getFamiliesList() {
return families_;
}
/**
* repeated bytes families = 2;
*/
public int getFamiliesCount() {
return families_.size();
}
/**
* repeated bytes families = 2;
*/
public com.google.protobuf.ByteString getFamilies(int index) {
return families_.get(index);
}
private void initFields() {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
families_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (hasTableName()) {
if (!getTableName().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, tableName_);
}
for (int i = 0; i < families_.size(); i++) {
output.writeBytes(2, families_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, tableName_);
}
{
int dataSize = 0;
for (int i = 0; i < families_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(families_.get(i));
}
size += dataSize;
size += 1 * getFamiliesList().size();
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF) obj;
boolean result = true;
result = result && (hasTableName() == other.hasTableName());
if (hasTableName()) {
result = result && getTableName()
.equals(other.getTableName());
}
result = result && getFamiliesList()
.equals(other.getFamiliesList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasTableName()) {
hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
hash = (53 * hash) + getTableName().hashCode();
}
if (getFamiliesCount() > 0) {
hash = (37 * hash) + FAMILIES_FIELD_NUMBER;
hash = (53 * hash) + getFamiliesList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.TableCF}
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCFOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableCF_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableCF_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getTableNameFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (tableNameBuilder_ == null) {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
} else {
tableNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
families_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableCF_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (tableNameBuilder_ == null) {
result.tableName_ = tableName_;
} else {
result.tableName_ = tableNameBuilder_.build();
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
families_ = java.util.Collections.unmodifiableList(families_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.families_ = families_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.getDefaultInstance()) return this;
if (other.hasTableName()) {
mergeTableName(other.getTableName());
}
if (!other.families_.isEmpty()) {
if (families_.isEmpty()) {
families_ = other.families_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureFamiliesIsMutable();
families_.addAll(other.families_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (hasTableName()) {
if (!getTableName().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional .hbase.pb.TableName table_name = 1;
private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_;
/**
* optional .hbase.pb.TableName table_name = 1;
*/
public boolean hasTableName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional .hbase.pb.TableName table_name = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() {
if (tableNameBuilder_ == null) {
return tableName_;
} else {
return tableNameBuilder_.getMessage();
}
}
/**
* optional .hbase.pb.TableName table_name = 1;
*/
public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
tableName_ = value;
onChanged();
} else {
tableNameBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* optional .hbase.pb.TableName table_name = 1;
*/
public Builder setTableName(
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) {
if (tableNameBuilder_ == null) {
tableName_ = builderForValue.build();
onChanged();
} else {
tableNameBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* optional .hbase.pb.TableName table_name = 1;
*/
public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) {
if (tableNameBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) {
tableName_ =
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
} else {
tableName_ = value;
}
onChanged();
} else {
tableNameBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* optional .hbase.pb.TableName table_name = 1;
*/
public Builder clearTableName() {
if (tableNameBuilder_ == null) {
tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
onChanged();
} else {
tableNameBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* optional .hbase.pb.TableName table_name = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getTableNameFieldBuilder().getBuilder();
}
/**
* optional .hbase.pb.TableName table_name = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() {
if (tableNameBuilder_ != null) {
return tableNameBuilder_.getMessageOrBuilder();
} else {
return tableName_;
}
}
/**
* optional .hbase.pb.TableName table_name = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>
getTableNameFieldBuilder() {
if (tableNameBuilder_ == null) {
tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>(
tableName_,
getParentForChildren(),
isClean());
tableName_ = null;
}
return tableNameBuilder_;
}
// repeated bytes families = 2;
private java.util.List families_ = java.util.Collections.emptyList();
private void ensureFamiliesIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
families_ = new java.util.ArrayList(families_);
bitField0_ |= 0x00000002;
}
}
/**
* repeated bytes families = 2;
*/
public java.util.List
getFamiliesList() {
return java.util.Collections.unmodifiableList(families_);
}
/**
* repeated bytes families = 2;
*/
public int getFamiliesCount() {
return families_.size();
}
/**
* repeated bytes families = 2;
*/
public com.google.protobuf.ByteString getFamilies(int index) {
return families_.get(index);
}
/**
* repeated bytes families = 2;
*/
public Builder setFamilies(
int index, com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureFamiliesIsMutable();
families_.set(index, value);
onChanged();
return this;
}
/**
* repeated bytes families = 2;
*/
public Builder addFamilies(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureFamiliesIsMutable();
families_.add(value);
onChanged();
return this;
}
/**
* repeated bytes families = 2;
*/
public Builder addAllFamilies(
java.lang.Iterable extends com.google.protobuf.ByteString> values) {
ensureFamiliesIsMutable();
super.addAll(values, families_);
onChanged();
return this;
}
/**
* repeated bytes families = 2;
*/
public Builder clearFamilies() {
families_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.TableCF)
}
static {
defaultInstance = new TableCF(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.TableCF)
}
public interface ReplicationPeerOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string clusterkey = 1;
/**
* required string clusterkey = 1;
*
*
* clusterkey is the concatenation of the slave cluster's
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
*
*/
boolean hasClusterkey();
/**
* required string clusterkey = 1;
*
*
* clusterkey is the concatenation of the slave cluster's
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
*
*/
java.lang.String getClusterkey();
/**
* required string clusterkey = 1;
*
*
* clusterkey is the concatenation of the slave cluster's
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
*
*/
com.google.protobuf.ByteString
getClusterkeyBytes();
// optional string replicationEndpointImpl = 2;
/**
* optional string replicationEndpointImpl = 2;
*/
boolean hasReplicationEndpointImpl();
/**
* optional string replicationEndpointImpl = 2;
*/
java.lang.String getReplicationEndpointImpl();
/**
* optional string replicationEndpointImpl = 2;
*/
com.google.protobuf.ByteString
getReplicationEndpointImplBytes();
// repeated .hbase.pb.BytesBytesPair data = 3;
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
java.util.List
getDataList();
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getData(int index);
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
int getDataCount();
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder>
getDataOrBuilderList();
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getDataOrBuilder(
int index);
// repeated .hbase.pb.NameStringPair configuration = 4;
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
java.util.List
getConfigurationList();
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index);
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
int getConfigurationCount();
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>
getConfigurationOrBuilderList();
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder(
int index);
// repeated .hbase.pb.TableCF table_cfs = 5;
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
java.util.List
getTableCfsList();
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF getTableCfs(int index);
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
int getTableCfsCount();
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
java.util.List extends org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCFOrBuilder>
getTableCfsOrBuilderList();
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCFOrBuilder getTableCfsOrBuilder(
int index);
// repeated bytes namespaces = 6;
/**
* repeated bytes namespaces = 6;
*/
java.util.List getNamespacesList();
/**
* repeated bytes namespaces = 6;
*/
int getNamespacesCount();
/**
* repeated bytes namespaces = 6;
*/
com.google.protobuf.ByteString getNamespaces(int index);
// optional int64 bandwidth = 7;
/**
* optional int64 bandwidth = 7;
*/
boolean hasBandwidth();
/**
* optional int64 bandwidth = 7;
*/
long getBandwidth();
}
/**
* Protobuf type {@code hbase.pb.ReplicationPeer}
*
*
**
* Used by replication. Holds a replication peer key.
*
*/
@javax.annotation.Generated("proto") public static final class ReplicationPeer extends
com.google.protobuf.GeneratedMessage
implements ReplicationPeerOrBuilder {
// Use ReplicationPeer.newBuilder() to construct.
private ReplicationPeer(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ReplicationPeer(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ReplicationPeer defaultInstance;
public static ReplicationPeer getDefaultInstance() {
return defaultInstance;
}
public ReplicationPeer getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReplicationPeer(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
clusterkey_ = input.readBytes();
break;
}
case 18: {
bitField0_ |= 0x00000002;
replicationEndpointImpl_ = input.readBytes();
break;
}
case 26: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
data_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000004;
}
data_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.PARSER, extensionRegistry));
break;
}
case 34: {
if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
configuration_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000008;
}
configuration_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.PARSER, extensionRegistry));
break;
}
case 42: {
if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
tableCfs_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000010;
}
tableCfs_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.PARSER, extensionRegistry));
break;
}
case 50: {
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
namespaces_ = new java.util.ArrayList();
mutable_bitField0_ |= 0x00000020;
}
namespaces_.add(input.readBytes());
break;
}
case 56: {
bitField0_ |= 0x00000004;
bandwidth_ = input.readInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
data_ = java.util.Collections.unmodifiableList(data_);
}
if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
configuration_ = java.util.Collections.unmodifiableList(configuration_);
}
if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
tableCfs_ = java.util.Collections.unmodifiableList(tableCfs_);
}
if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
namespaces_ = java.util.Collections.unmodifiableList(namespaces_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationPeer_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationPeer_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ReplicationPeer parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ReplicationPeer(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required string clusterkey = 1;
public static final int CLUSTERKEY_FIELD_NUMBER = 1;
private java.lang.Object clusterkey_;
/**
* required string clusterkey = 1;
*
*
* clusterkey is the concatenation of the slave cluster's
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
*
*/
public boolean hasClusterkey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string clusterkey = 1;
*
*
* clusterkey is the concatenation of the slave cluster's
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
*
*/
public java.lang.String getClusterkey() {
java.lang.Object ref = clusterkey_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
clusterkey_ = s;
}
return s;
}
}
/**
* required string clusterkey = 1;
*
*
* clusterkey is the concatenation of the slave cluster's
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
*
*/
public com.google.protobuf.ByteString
getClusterkeyBytes() {
java.lang.Object ref = clusterkey_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clusterkey_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional string replicationEndpointImpl = 2;
public static final int REPLICATIONENDPOINTIMPL_FIELD_NUMBER = 2;
private java.lang.Object replicationEndpointImpl_;
/**
* optional string replicationEndpointImpl = 2;
*/
public boolean hasReplicationEndpointImpl() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional string replicationEndpointImpl = 2;
*/
public java.lang.String getReplicationEndpointImpl() {
java.lang.Object ref = replicationEndpointImpl_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
replicationEndpointImpl_ = s;
}
return s;
}
}
/**
* optional string replicationEndpointImpl = 2;
*/
public com.google.protobuf.ByteString
getReplicationEndpointImplBytes() {
java.lang.Object ref = replicationEndpointImpl_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
replicationEndpointImpl_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// repeated .hbase.pb.BytesBytesPair data = 3;
public static final int DATA_FIELD_NUMBER = 3;
private java.util.List data_;
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public java.util.List getDataList() {
return data_;
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder>
getDataOrBuilderList() {
return data_;
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public int getDataCount() {
return data_.size();
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getData(int index) {
return data_.get(index);
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getDataOrBuilder(
int index) {
return data_.get(index);
}
// repeated .hbase.pb.NameStringPair configuration = 4;
public static final int CONFIGURATION_FIELD_NUMBER = 4;
private java.util.List configuration_;
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public java.util.List getConfigurationList() {
return configuration_;
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>
getConfigurationOrBuilderList() {
return configuration_;
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public int getConfigurationCount() {
return configuration_.size();
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) {
return configuration_.get(index);
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder(
int index) {
return configuration_.get(index);
}
// repeated .hbase.pb.TableCF table_cfs = 5;
public static final int TABLE_CFS_FIELD_NUMBER = 5;
private java.util.List tableCfs_;
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public java.util.List getTableCfsList() {
return tableCfs_;
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCFOrBuilder>
getTableCfsOrBuilderList() {
return tableCfs_;
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public int getTableCfsCount() {
return tableCfs_.size();
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF getTableCfs(int index) {
return tableCfs_.get(index);
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCFOrBuilder getTableCfsOrBuilder(
int index) {
return tableCfs_.get(index);
}
// repeated bytes namespaces = 6;
public static final int NAMESPACES_FIELD_NUMBER = 6;
private java.util.List namespaces_;
/**
* repeated bytes namespaces = 6;
*/
public java.util.List
getNamespacesList() {
return namespaces_;
}
/**
* repeated bytes namespaces = 6;
*/
public int getNamespacesCount() {
return namespaces_.size();
}
/**
* repeated bytes namespaces = 6;
*/
public com.google.protobuf.ByteString getNamespaces(int index) {
return namespaces_.get(index);
}
// optional int64 bandwidth = 7;
public static final int BANDWIDTH_FIELD_NUMBER = 7;
private long bandwidth_;
/**
* optional int64 bandwidth = 7;
*/
public boolean hasBandwidth() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional int64 bandwidth = 7;
*/
public long getBandwidth() {
return bandwidth_;
}
private void initFields() {
clusterkey_ = "";
replicationEndpointImpl_ = "";
data_ = java.util.Collections.emptyList();
configuration_ = java.util.Collections.emptyList();
tableCfs_ = java.util.Collections.emptyList();
namespaces_ = java.util.Collections.emptyList();
bandwidth_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasClusterkey()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getDataCount(); i++) {
if (!getData(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getConfigurationCount(); i++) {
if (!getConfiguration(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getTableCfsCount(); i++) {
if (!getTableCfs(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getClusterkeyBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(2, getReplicationEndpointImplBytes());
}
for (int i = 0; i < data_.size(); i++) {
output.writeMessage(3, data_.get(i));
}
for (int i = 0; i < configuration_.size(); i++) {
output.writeMessage(4, configuration_.get(i));
}
for (int i = 0; i < tableCfs_.size(); i++) {
output.writeMessage(5, tableCfs_.get(i));
}
for (int i = 0; i < namespaces_.size(); i++) {
output.writeBytes(6, namespaces_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeInt64(7, bandwidth_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getClusterkeyBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getReplicationEndpointImplBytes());
}
for (int i = 0; i < data_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, data_.get(i));
}
for (int i = 0; i < configuration_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, configuration_.get(i));
}
for (int i = 0; i < tableCfs_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, tableCfs_.get(i));
}
{
int dataSize = 0;
for (int i = 0; i < namespaces_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(namespaces_.get(i));
}
size += dataSize;
size += 1 * getNamespacesList().size();
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(7, bandwidth_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer) obj;
boolean result = true;
result = result && (hasClusterkey() == other.hasClusterkey());
if (hasClusterkey()) {
result = result && getClusterkey()
.equals(other.getClusterkey());
}
result = result && (hasReplicationEndpointImpl() == other.hasReplicationEndpointImpl());
if (hasReplicationEndpointImpl()) {
result = result && getReplicationEndpointImpl()
.equals(other.getReplicationEndpointImpl());
}
result = result && getDataList()
.equals(other.getDataList());
result = result && getConfigurationList()
.equals(other.getConfigurationList());
result = result && getTableCfsList()
.equals(other.getTableCfsList());
result = result && getNamespacesList()
.equals(other.getNamespacesList());
result = result && (hasBandwidth() == other.hasBandwidth());
if (hasBandwidth()) {
result = result && (getBandwidth()
== other.getBandwidth());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasClusterkey()) {
hash = (37 * hash) + CLUSTERKEY_FIELD_NUMBER;
hash = (53 * hash) + getClusterkey().hashCode();
}
if (hasReplicationEndpointImpl()) {
hash = (37 * hash) + REPLICATIONENDPOINTIMPL_FIELD_NUMBER;
hash = (53 * hash) + getReplicationEndpointImpl().hashCode();
}
if (getDataCount() > 0) {
hash = (37 * hash) + DATA_FIELD_NUMBER;
hash = (53 * hash) + getDataList().hashCode();
}
if (getConfigurationCount() > 0) {
hash = (37 * hash) + CONFIGURATION_FIELD_NUMBER;
hash = (53 * hash) + getConfigurationList().hashCode();
}
if (getTableCfsCount() > 0) {
hash = (37 * hash) + TABLE_CFS_FIELD_NUMBER;
hash = (53 * hash) + getTableCfsList().hashCode();
}
if (getNamespacesCount() > 0) {
hash = (37 * hash) + NAMESPACES_FIELD_NUMBER;
hash = (53 * hash) + getNamespacesList().hashCode();
}
if (hasBandwidth()) {
hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getBandwidth());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.ReplicationPeer}
*
*
**
* Used by replication. Holds a replication peer key.
*
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationPeer_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationPeer_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getDataFieldBuilder();
getConfigurationFieldBuilder();
getTableCfsFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
clusterkey_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
replicationEndpointImpl_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
if (dataBuilder_ == null) {
data_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
dataBuilder_.clear();
}
if (configurationBuilder_ == null) {
configuration_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
} else {
configurationBuilder_.clear();
}
if (tableCfsBuilder_ == null) {
tableCfs_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
} else {
tableCfsBuilder_.clear();
}
namespaces_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
bandwidth_ = 0L;
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationPeer_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.clusterkey_ = clusterkey_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.replicationEndpointImpl_ = replicationEndpointImpl_;
if (dataBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
data_ = java.util.Collections.unmodifiableList(data_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.data_ = data_;
} else {
result.data_ = dataBuilder_.build();
}
if (configurationBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008)) {
configuration_ = java.util.Collections.unmodifiableList(configuration_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.configuration_ = configuration_;
} else {
result.configuration_ = configurationBuilder_.build();
}
if (tableCfsBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010)) {
tableCfs_ = java.util.Collections.unmodifiableList(tableCfs_);
bitField0_ = (bitField0_ & ~0x00000010);
}
result.tableCfs_ = tableCfs_;
} else {
result.tableCfs_ = tableCfsBuilder_.build();
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
namespaces_ = java.util.Collections.unmodifiableList(namespaces_);
bitField0_ = (bitField0_ & ~0x00000020);
}
result.namespaces_ = namespaces_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000004;
}
result.bandwidth_ = bandwidth_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance()) return this;
if (other.hasClusterkey()) {
bitField0_ |= 0x00000001;
clusterkey_ = other.clusterkey_;
onChanged();
}
if (other.hasReplicationEndpointImpl()) {
bitField0_ |= 0x00000002;
replicationEndpointImpl_ = other.replicationEndpointImpl_;
onChanged();
}
if (dataBuilder_ == null) {
if (!other.data_.isEmpty()) {
if (data_.isEmpty()) {
data_ = other.data_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureDataIsMutable();
data_.addAll(other.data_);
}
onChanged();
}
} else {
if (!other.data_.isEmpty()) {
if (dataBuilder_.isEmpty()) {
dataBuilder_.dispose();
dataBuilder_ = null;
data_ = other.data_;
bitField0_ = (bitField0_ & ~0x00000004);
dataBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getDataFieldBuilder() : null;
} else {
dataBuilder_.addAllMessages(other.data_);
}
}
}
if (configurationBuilder_ == null) {
if (!other.configuration_.isEmpty()) {
if (configuration_.isEmpty()) {
configuration_ = other.configuration_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureConfigurationIsMutable();
configuration_.addAll(other.configuration_);
}
onChanged();
}
} else {
if (!other.configuration_.isEmpty()) {
if (configurationBuilder_.isEmpty()) {
configurationBuilder_.dispose();
configurationBuilder_ = null;
configuration_ = other.configuration_;
bitField0_ = (bitField0_ & ~0x00000008);
configurationBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getConfigurationFieldBuilder() : null;
} else {
configurationBuilder_.addAllMessages(other.configuration_);
}
}
}
if (tableCfsBuilder_ == null) {
if (!other.tableCfs_.isEmpty()) {
if (tableCfs_.isEmpty()) {
tableCfs_ = other.tableCfs_;
bitField0_ = (bitField0_ & ~0x00000010);
} else {
ensureTableCfsIsMutable();
tableCfs_.addAll(other.tableCfs_);
}
onChanged();
}
} else {
if (!other.tableCfs_.isEmpty()) {
if (tableCfsBuilder_.isEmpty()) {
tableCfsBuilder_.dispose();
tableCfsBuilder_ = null;
tableCfs_ = other.tableCfs_;
bitField0_ = (bitField0_ & ~0x00000010);
tableCfsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getTableCfsFieldBuilder() : null;
} else {
tableCfsBuilder_.addAllMessages(other.tableCfs_);
}
}
}
if (!other.namespaces_.isEmpty()) {
if (namespaces_.isEmpty()) {
namespaces_ = other.namespaces_;
bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureNamespacesIsMutable();
namespaces_.addAll(other.namespaces_);
}
onChanged();
}
if (other.hasBandwidth()) {
setBandwidth(other.getBandwidth());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasClusterkey()) {
return false;
}
for (int i = 0; i < getDataCount(); i++) {
if (!getData(i).isInitialized()) {
return false;
}
}
for (int i = 0; i < getConfigurationCount(); i++) {
if (!getConfiguration(i).isInitialized()) {
return false;
}
}
for (int i = 0; i < getTableCfsCount(); i++) {
if (!getTableCfs(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string clusterkey = 1;
private java.lang.Object clusterkey_ = "";
/**
* required string clusterkey = 1;
*
*
* clusterkey is the concatenation of the slave cluster's
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
*
*/
public boolean hasClusterkey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required string clusterkey = 1;
*
*
* clusterkey is the concatenation of the slave cluster's
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
*
*/
public java.lang.String getClusterkey() {
java.lang.Object ref = clusterkey_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
clusterkey_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* required string clusterkey = 1;
*
*
* clusterkey is the concatenation of the slave cluster's
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
*
*/
public com.google.protobuf.ByteString
getClusterkeyBytes() {
java.lang.Object ref = clusterkey_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
clusterkey_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* required string clusterkey = 1;
*
*
* clusterkey is the concatenation of the slave cluster's
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
*
*/
public Builder setClusterkey(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
clusterkey_ = value;
onChanged();
return this;
}
/**
* required string clusterkey = 1;
*
*
* clusterkey is the concatenation of the slave cluster's
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
*
*/
public Builder clearClusterkey() {
bitField0_ = (bitField0_ & ~0x00000001);
clusterkey_ = getDefaultInstance().getClusterkey();
onChanged();
return this;
}
/**
* required string clusterkey = 1;
*
*
* clusterkey is the concatenation of the slave cluster's
* hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
*
*/
public Builder setClusterkeyBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
clusterkey_ = value;
onChanged();
return this;
}
// optional string replicationEndpointImpl = 2;
private java.lang.Object replicationEndpointImpl_ = "";
/**
* optional string replicationEndpointImpl = 2;
*/
public boolean hasReplicationEndpointImpl() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional string replicationEndpointImpl = 2;
*/
public java.lang.String getReplicationEndpointImpl() {
java.lang.Object ref = replicationEndpointImpl_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
replicationEndpointImpl_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string replicationEndpointImpl = 2;
*/
public com.google.protobuf.ByteString
getReplicationEndpointImplBytes() {
java.lang.Object ref = replicationEndpointImpl_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
replicationEndpointImpl_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string replicationEndpointImpl = 2;
*/
public Builder setReplicationEndpointImpl(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
replicationEndpointImpl_ = value;
onChanged();
return this;
}
/**
* optional string replicationEndpointImpl = 2;
*/
public Builder clearReplicationEndpointImpl() {
bitField0_ = (bitField0_ & ~0x00000002);
replicationEndpointImpl_ = getDefaultInstance().getReplicationEndpointImpl();
onChanged();
return this;
}
/**
* optional string replicationEndpointImpl = 2;
*/
public Builder setReplicationEndpointImplBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
replicationEndpointImpl_ = value;
onChanged();
return this;
}
// repeated .hbase.pb.BytesBytesPair data = 3;
private java.util.List data_ =
java.util.Collections.emptyList();
private void ensureDataIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
data_ = new java.util.ArrayList(data_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> dataBuilder_;
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public java.util.List getDataList() {
if (dataBuilder_ == null) {
return java.util.Collections.unmodifiableList(data_);
} else {
return dataBuilder_.getMessageList();
}
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public int getDataCount() {
if (dataBuilder_ == null) {
return data_.size();
} else {
return dataBuilder_.getCount();
}
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getData(int index) {
if (dataBuilder_ == null) {
return data_.get(index);
} else {
return dataBuilder_.getMessage(index);
}
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public Builder setData(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) {
if (dataBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDataIsMutable();
data_.set(index, value);
onChanged();
} else {
dataBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public Builder setData(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) {
if (dataBuilder_ == null) {
ensureDataIsMutable();
data_.set(index, builderForValue.build());
onChanged();
} else {
dataBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public Builder addData(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) {
if (dataBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDataIsMutable();
data_.add(value);
onChanged();
} else {
dataBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public Builder addData(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) {
if (dataBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDataIsMutable();
data_.add(index, value);
onChanged();
} else {
dataBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public Builder addData(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) {
if (dataBuilder_ == null) {
ensureDataIsMutable();
data_.add(builderForValue.build());
onChanged();
} else {
dataBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public Builder addData(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) {
if (dataBuilder_ == null) {
ensureDataIsMutable();
data_.add(index, builderForValue.build());
onChanged();
} else {
dataBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public Builder addAllData(
java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair> values) {
if (dataBuilder_ == null) {
ensureDataIsMutable();
super.addAll(values, data_);
onChanged();
} else {
dataBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public Builder clearData() {
if (dataBuilder_ == null) {
data_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
dataBuilder_.clear();
}
return this;
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public Builder removeData(int index) {
if (dataBuilder_ == null) {
ensureDataIsMutable();
data_.remove(index);
onChanged();
} else {
dataBuilder_.remove(index);
}
return this;
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder getDataBuilder(
int index) {
return getDataFieldBuilder().getBuilder(index);
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getDataOrBuilder(
int index) {
if (dataBuilder_ == null) {
return data_.get(index); } else {
return dataBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder>
getDataOrBuilderList() {
if (dataBuilder_ != null) {
return dataBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(data_);
}
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addDataBuilder() {
return getDataFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance());
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addDataBuilder(
int index) {
return getDataFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance());
}
/**
* repeated .hbase.pb.BytesBytesPair data = 3;
*/
public java.util.List
getDataBuilderList() {
return getDataFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder>
getDataFieldBuilder() {
if (dataBuilder_ == null) {
dataBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder>(
data_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
data_ = null;
}
return dataBuilder_;
}
// repeated .hbase.pb.NameStringPair configuration = 4;
private java.util.List configuration_ =
java.util.Collections.emptyList();
private void ensureConfigurationIsMutable() {
if (!((bitField0_ & 0x00000008) == 0x00000008)) {
configuration_ = new java.util.ArrayList(configuration_);
bitField0_ |= 0x00000008;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> configurationBuilder_;
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public java.util.List getConfigurationList() {
if (configurationBuilder_ == null) {
return java.util.Collections.unmodifiableList(configuration_);
} else {
return configurationBuilder_.getMessageList();
}
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public int getConfigurationCount() {
if (configurationBuilder_ == null) {
return configuration_.size();
} else {
return configurationBuilder_.getCount();
}
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) {
if (configurationBuilder_ == null) {
return configuration_.get(index);
} else {
return configurationBuilder_.getMessage(index);
}
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public Builder setConfiguration(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) {
if (configurationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureConfigurationIsMutable();
configuration_.set(index, value);
onChanged();
} else {
configurationBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public Builder setConfiguration(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) {
if (configurationBuilder_ == null) {
ensureConfigurationIsMutable();
configuration_.set(index, builderForValue.build());
onChanged();
} else {
configurationBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public Builder addConfiguration(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) {
if (configurationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureConfigurationIsMutable();
configuration_.add(value);
onChanged();
} else {
configurationBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public Builder addConfiguration(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) {
if (configurationBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureConfigurationIsMutable();
configuration_.add(index, value);
onChanged();
} else {
configurationBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public Builder addConfiguration(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) {
if (configurationBuilder_ == null) {
ensureConfigurationIsMutable();
configuration_.add(builderForValue.build());
onChanged();
} else {
configurationBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public Builder addConfiguration(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) {
if (configurationBuilder_ == null) {
ensureConfigurationIsMutable();
configuration_.add(index, builderForValue.build());
onChanged();
} else {
configurationBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public Builder addAllConfiguration(
java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> values) {
if (configurationBuilder_ == null) {
ensureConfigurationIsMutable();
super.addAll(values, configuration_);
onChanged();
} else {
configurationBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public Builder clearConfiguration() {
if (configurationBuilder_ == null) {
configuration_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
} else {
configurationBuilder_.clear();
}
return this;
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public Builder removeConfiguration(int index) {
if (configurationBuilder_ == null) {
ensureConfigurationIsMutable();
configuration_.remove(index);
onChanged();
} else {
configurationBuilder_.remove(index);
}
return this;
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder getConfigurationBuilder(
int index) {
return getConfigurationFieldBuilder().getBuilder(index);
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder(
int index) {
if (configurationBuilder_ == null) {
return configuration_.get(index); } else {
return configurationBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>
getConfigurationOrBuilderList() {
if (configurationBuilder_ != null) {
return configurationBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(configuration_);
}
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder() {
return getConfigurationFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance());
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder(
int index) {
return getConfigurationFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance());
}
/**
* repeated .hbase.pb.NameStringPair configuration = 4;
*/
public java.util.List
getConfigurationBuilderList() {
return getConfigurationFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>
getConfigurationFieldBuilder() {
if (configurationBuilder_ == null) {
configurationBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>(
configuration_,
((bitField0_ & 0x00000008) == 0x00000008),
getParentForChildren(),
isClean());
configuration_ = null;
}
return configurationBuilder_;
}
// repeated .hbase.pb.TableCF table_cfs = 5;
private java.util.List tableCfs_ =
java.util.Collections.emptyList();
private void ensureTableCfsIsMutable() {
if (!((bitField0_ & 0x00000010) == 0x00000010)) {
tableCfs_ = new java.util.ArrayList(tableCfs_);
bitField0_ |= 0x00000010;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCFOrBuilder> tableCfsBuilder_;
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public java.util.List getTableCfsList() {
if (tableCfsBuilder_ == null) {
return java.util.Collections.unmodifiableList(tableCfs_);
} else {
return tableCfsBuilder_.getMessageList();
}
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public int getTableCfsCount() {
if (tableCfsBuilder_ == null) {
return tableCfs_.size();
} else {
return tableCfsBuilder_.getCount();
}
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF getTableCfs(int index) {
if (tableCfsBuilder_ == null) {
return tableCfs_.get(index);
} else {
return tableCfsBuilder_.getMessage(index);
}
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public Builder setTableCfs(
int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF value) {
if (tableCfsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTableCfsIsMutable();
tableCfs_.set(index, value);
onChanged();
} else {
tableCfsBuilder_.setMessage(index, value);
}
return this;
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public Builder setTableCfs(
int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.Builder builderForValue) {
if (tableCfsBuilder_ == null) {
ensureTableCfsIsMutable();
tableCfs_.set(index, builderForValue.build());
onChanged();
} else {
tableCfsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public Builder addTableCfs(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF value) {
if (tableCfsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTableCfsIsMutable();
tableCfs_.add(value);
onChanged();
} else {
tableCfsBuilder_.addMessage(value);
}
return this;
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public Builder addTableCfs(
int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF value) {
if (tableCfsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureTableCfsIsMutable();
tableCfs_.add(index, value);
onChanged();
} else {
tableCfsBuilder_.addMessage(index, value);
}
return this;
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public Builder addTableCfs(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.Builder builderForValue) {
if (tableCfsBuilder_ == null) {
ensureTableCfsIsMutable();
tableCfs_.add(builderForValue.build());
onChanged();
} else {
tableCfsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public Builder addTableCfs(
int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.Builder builderForValue) {
if (tableCfsBuilder_ == null) {
ensureTableCfsIsMutable();
tableCfs_.add(index, builderForValue.build());
onChanged();
} else {
tableCfsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public Builder addAllTableCfs(
java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF> values) {
if (tableCfsBuilder_ == null) {
ensureTableCfsIsMutable();
super.addAll(values, tableCfs_);
onChanged();
} else {
tableCfsBuilder_.addAllMessages(values);
}
return this;
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public Builder clearTableCfs() {
if (tableCfsBuilder_ == null) {
tableCfs_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
onChanged();
} else {
tableCfsBuilder_.clear();
}
return this;
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public Builder removeTableCfs(int index) {
if (tableCfsBuilder_ == null) {
ensureTableCfsIsMutable();
tableCfs_.remove(index);
onChanged();
} else {
tableCfsBuilder_.remove(index);
}
return this;
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.Builder getTableCfsBuilder(
int index) {
return getTableCfsFieldBuilder().getBuilder(index);
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCFOrBuilder getTableCfsOrBuilder(
int index) {
if (tableCfsBuilder_ == null) {
return tableCfs_.get(index); } else {
return tableCfsBuilder_.getMessageOrBuilder(index);
}
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCFOrBuilder>
getTableCfsOrBuilderList() {
if (tableCfsBuilder_ != null) {
return tableCfsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(tableCfs_);
}
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.Builder addTableCfsBuilder() {
return getTableCfsFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.getDefaultInstance());
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.Builder addTableCfsBuilder(
int index) {
return getTableCfsFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.getDefaultInstance());
}
/**
* repeated .hbase.pb.TableCF table_cfs = 5;
*/
public java.util.List
getTableCfsBuilderList() {
return getTableCfsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCFOrBuilder>
getTableCfsFieldBuilder() {
if (tableCfsBuilder_ == null) {
tableCfsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCF.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableCFOrBuilder>(
tableCfs_,
((bitField0_ & 0x00000010) == 0x00000010),
getParentForChildren(),
isClean());
tableCfs_ = null;
}
return tableCfsBuilder_;
}
// repeated bytes namespaces = 6;
private java.util.List namespaces_ = java.util.Collections.emptyList();
private void ensureNamespacesIsMutable() {
if (!((bitField0_ & 0x00000020) == 0x00000020)) {
namespaces_ = new java.util.ArrayList(namespaces_);
bitField0_ |= 0x00000020;
}
}
/**
* repeated bytes namespaces = 6;
*/
public java.util.List
getNamespacesList() {
return java.util.Collections.unmodifiableList(namespaces_);
}
/**
* repeated bytes namespaces = 6;
*/
public int getNamespacesCount() {
return namespaces_.size();
}
/**
* repeated bytes namespaces = 6;
*/
public com.google.protobuf.ByteString getNamespaces(int index) {
return namespaces_.get(index);
}
/**
* repeated bytes namespaces = 6;
*/
public Builder setNamespaces(
int index, com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureNamespacesIsMutable();
namespaces_.set(index, value);
onChanged();
return this;
}
/**
* repeated bytes namespaces = 6;
*/
public Builder addNamespaces(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureNamespacesIsMutable();
namespaces_.add(value);
onChanged();
return this;
}
/**
* repeated bytes namespaces = 6;
*/
public Builder addAllNamespaces(
java.lang.Iterable extends com.google.protobuf.ByteString> values) {
ensureNamespacesIsMutable();
super.addAll(values, namespaces_);
onChanged();
return this;
}
/**
* repeated bytes namespaces = 6;
*/
public Builder clearNamespaces() {
namespaces_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
return this;
}
// optional int64 bandwidth = 7;
private long bandwidth_ ;
/**
* optional int64 bandwidth = 7;
*/
public boolean hasBandwidth() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* optional int64 bandwidth = 7;
*/
public long getBandwidth() {
return bandwidth_;
}
/**
* optional int64 bandwidth = 7;
*/
public Builder setBandwidth(long value) {
bitField0_ |= 0x00000040;
bandwidth_ = value;
onChanged();
return this;
}
/**
* optional int64 bandwidth = 7;
*/
public Builder clearBandwidth() {
bitField0_ = (bitField0_ & ~0x00000040);
bandwidth_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.ReplicationPeer)
}
static {
defaultInstance = new ReplicationPeer(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.ReplicationPeer)
}
public interface ReplicationStateOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hbase.pb.ReplicationState.State state = 1;
/**
* required .hbase.pb.ReplicationState.State state = 1;
*/
boolean hasState();
/**
* required .hbase.pb.ReplicationState.State state = 1;
*/
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State getState();
}
/**
* Protobuf type {@code hbase.pb.ReplicationState}
*
*
**
* Used by replication. Holds whether enabled or disabled
*
*/
@javax.annotation.Generated("proto") public static final class ReplicationState extends
com.google.protobuf.GeneratedMessage
implements ReplicationStateOrBuilder {
// Use ReplicationState.newBuilder() to construct.
private ReplicationState(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ReplicationState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ReplicationState defaultInstance;
public static ReplicationState getDefaultInstance() {
return defaultInstance;
}
public ReplicationState getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReplicationState(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
state_ = value;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationState_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationState_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ReplicationState parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ReplicationState(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code hbase.pb.ReplicationState.State}
*/
public enum State
implements com.google.protobuf.ProtocolMessageEnum {
/**
* ENABLED = 0;
*/
ENABLED(0, 0),
/**
* DISABLED = 1;
*/
DISABLED(1, 1),
;
/**
* ENABLED = 0;
*/
public static final int ENABLED_VALUE = 0;
/**
* DISABLED = 1;
*/
public static final int DISABLED_VALUE = 1;
public final int getNumber() { return value; }
public static State valueOf(int value) {
switch (value) {
case 0: return ENABLED;
case 1: return DISABLED;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public State findValueByNumber(int number) {
return State.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDescriptor().getEnumTypes().get(0);
}
private static final State[] VALUES = values();
public static State valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private State(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hbase.pb.ReplicationState.State)
}
private int bitField0_;
// required .hbase.pb.ReplicationState.State state = 1;
public static final int STATE_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State state_;
/**
* required .hbase.pb.ReplicationState.State state = 1;
*/
public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.ReplicationState.State state = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State getState() {
return state_;
}
private void initFields() {
state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasState()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, state_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, state_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState) obj;
boolean result = true;
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.ReplicationState}
*
*
**
* Used by replication. Holds whether enabled or disabled
*
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationStateOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationState_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationState_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationState_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.state_ = state_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.getDefaultInstance()) return this;
if (other.hasState()) {
setState(other.getState());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasState()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hbase.pb.ReplicationState.State state = 1;
private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED;
/**
* required .hbase.pb.ReplicationState.State state = 1;
*/
public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hbase.pb.ReplicationState.State state = 1;
*/
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State getState() {
return state_;
}
/**
* required .hbase.pb.ReplicationState.State state = 1;
*/
public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
state_ = value;
onChanged();
return this;
}
/**
* required .hbase.pb.ReplicationState.State state = 1;
*/
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000001);
state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationState.State.ENABLED;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.ReplicationState)
}
static {
defaultInstance = new ReplicationState(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.ReplicationState)
}
public interface ReplicationHLogPositionOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required int64 position = 1;
/**
* required int64 position = 1;
*/
boolean hasPosition();
/**
* required int64 position = 1;
*/
long getPosition();
}
/**
* Protobuf type {@code hbase.pb.ReplicationHLogPosition}
*
*
**
* Used by replication. Holds the current position in an WAL file.
*
*/
@javax.annotation.Generated("proto") public static final class ReplicationHLogPosition extends
com.google.protobuf.GeneratedMessage
implements ReplicationHLogPositionOrBuilder {
// Use ReplicationHLogPosition.newBuilder() to construct.
private ReplicationHLogPosition(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ReplicationHLogPosition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ReplicationHLogPosition defaultInstance;
public static ReplicationHLogPosition getDefaultInstance() {
return defaultInstance;
}
public ReplicationHLogPosition getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReplicationHLogPosition(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
position_ = input.readInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationHLogPosition_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationHLogPosition_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public ReplicationHLogPosition parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ReplicationHLogPosition(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required int64 position = 1;
public static final int POSITION_FIELD_NUMBER = 1;
private long position_;
/**
* required int64 position = 1;
*/
public boolean hasPosition() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required int64 position = 1;
*/
public long getPosition() {
return position_;
}
private void initFields() {
position_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasPosition()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeInt64(1, position_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeInt64Size(1, position_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition) obj;
boolean result = true;
result = result && (hasPosition() == other.hasPosition());
if (hasPosition()) {
result = result && (getPosition()
== other.getPosition());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasPosition()) {
hash = (37 * hash) + POSITION_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getPosition());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.ReplicationHLogPosition}
*
*
**
* Used by replication. Holds the current position in an WAL file.
*
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPositionOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationHLogPosition_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationHLogPosition_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
position_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_ReplicationHLogPosition_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.position_ = position_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition.getDefaultInstance()) return this;
if (other.hasPosition()) {
setPosition(other.getPosition());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasPosition()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationHLogPosition) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required int64 position = 1;
private long position_ ;
/**
* required int64 position = 1;
*/
public boolean hasPosition() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required int64 position = 1;
*/
public long getPosition() {
return position_;
}
/**
* required int64 position = 1;
*/
public Builder setPosition(long value) {
bitField0_ |= 0x00000001;
position_ = value;
onChanged();
return this;
}
/**
* required int64 position = 1;
*/
public Builder clearPosition() {
bitField0_ = (bitField0_ & ~0x00000001);
position_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.ReplicationHLogPosition)
}
static {
defaultInstance = new ReplicationHLogPosition(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.ReplicationHLogPosition)
}
public interface SwitchStateOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional bool enabled = 1;
/**
* optional bool enabled = 1;
*/
boolean hasEnabled();
/**
* optional bool enabled = 1;
*/
boolean getEnabled();
}
/**
* Protobuf type {@code hbase.pb.SwitchState}
*
*
**
* State of the switch.
*
*/
@javax.annotation.Generated("proto") public static final class SwitchState extends
com.google.protobuf.GeneratedMessage
implements SwitchStateOrBuilder {
// Use SwitchState.newBuilder() to construct.
private SwitchState(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private SwitchState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final SwitchState defaultInstance;
public static SwitchState getDefaultInstance() {
return defaultInstance;
}
public SwitchState getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SwitchState(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
enabled_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public SwitchState parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new SwitchState(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// optional bool enabled = 1;
public static final int ENABLED_FIELD_NUMBER = 1;
private boolean enabled_;
/**
* optional bool enabled = 1;
*/
public boolean hasEnabled() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional bool enabled = 1;
*/
public boolean getEnabled() {
return enabled_;
}
private void initFields() {
enabled_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBool(1, enabled_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(1, enabled_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState) obj;
boolean result = true;
result = result && (hasEnabled() == other.hasEnabled());
if (hasEnabled()) {
result = result && (getEnabled()
== other.getEnabled());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasEnabled()) {
hash = (37 * hash) + ENABLED_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getEnabled());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.SwitchState}
*
*
**
* State of the switch.
*
*/
@javax.annotation.Generated("proto") public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchStateOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
enabled_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SwitchState_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState build() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.enabled_ = enabled_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState.getDefaultInstance()) return this;
if (other.hasEnabled()) {
setEnabled(other.getEnabled());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional bool enabled = 1;
private boolean enabled_ ;
/**
* optional bool enabled = 1;
*/
public boolean hasEnabled() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* optional bool enabled = 1;
*/
public boolean getEnabled() {
return enabled_;
}
/**
* optional bool enabled = 1;
*/
public Builder setEnabled(boolean value) {
bitField0_ |= 0x00000001;
enabled_ = value;
onChanged();
return this;
}
/**
* optional bool enabled = 1;
*/
public Builder clearEnabled() {
bitField0_ = (bitField0_ & ~0x00000001);
enabled_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.SwitchState)
}
static {
defaultInstance = new SwitchState(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.SwitchState)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_MetaRegionServer_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_MetaRegionServer_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_Master_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_Master_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_ClusterUp_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_ClusterUp_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_SplitLogTask_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_SplitLogTask_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_DeprecatedTableState_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_TableCF_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_TableCF_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_ReplicationPeer_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_ReplicationPeer_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_ReplicationState_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_ReplicationState_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_ReplicationHLogPosition_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_ReplicationHLogPosition_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_SwitchState_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_SwitchState_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\017ZooKeeper.proto\022\010hbase.pb\032\013Table.proto" +
"\032\013HBase.proto\032\023ClusterStatus.proto\"y\n\020Me" +
"taRegionServer\022$\n\006server\030\001 \002(\0132\024.hbase.p" +
"b.ServerName\022\023\n\013rpc_version\030\002 \001(\r\022*\n\005sta" +
"te\030\003 \001(\0162\033.hbase.pb.RegionState.State\"V\n" +
"\006Master\022$\n\006master\030\001 \002(\0132\024.hbase.pb.Serve" +
"rName\022\023\n\013rpc_version\030\002 \001(\r\022\021\n\tinfo_port\030" +
"\003 \001(\r\"\037\n\tClusterUp\022\022\n\nstart_date\030\001 \002(\t\"\253" +
"\001\n\014SplitLogTask\022+\n\005state\030\001 \002(\0162\034.hbase.p" +
"b.SplitLogTask.State\022)\n\013server_name\030\002 \002(",
"\0132\024.hbase.pb.ServerName\"C\n\005State\022\016\n\nUNAS" +
"SIGNED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DON" +
"E\020\003\022\007\n\003ERR\020\004\"\225\001\n\024DeprecatedTableState\022<\n" +
"\005state\030\001 \002(\0162$.hbase.pb.DeprecatedTableS" +
"tate.State:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020" +
"\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLI" +
"NG\020\003\"D\n\007TableCF\022\'\n\ntable_name\030\001 \001(\0132\023.hb" +
"ase.pb.TableName\022\020\n\010families\030\002 \003(\014\"\354\001\n\017R" +
"eplicationPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027re" +
"plicationEndpointImpl\030\002 \001(\t\022&\n\004data\030\003 \003(",
"\0132\030.hbase.pb.BytesBytesPair\022/\n\rconfigura" +
"tion\030\004 \003(\0132\030.hbase.pb.NameStringPair\022$\n\t" +
"table_cfs\030\005 \003(\0132\021.hbase.pb.TableCF\022\022\n\nna" +
"mespaces\030\006 \003(\014\022\021\n\tbandwidth\030\007 \001(\003\"g\n\020Rep" +
"licationState\022/\n\005state\030\001 \002(\0162 .hbase.pb." +
"ReplicationState.State\"\"\n\005State\022\013\n\007ENABL" +
"ED\020\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationHLogPos" +
"ition\022\020\n\010position\030\001 \002(\003\"\036\n\013SwitchState\022\017" +
"\n\007enabled\030\001 \001(\010BE\n*org.apache.hadoop.hba" +
"se.protobuf.generatedB\017ZooKeeperProtosH\001",
"\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_hbase_pb_MetaRegionServer_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_hbase_pb_MetaRegionServer_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_MetaRegionServer_descriptor,
new java.lang.String[] { "Server", "RpcVersion", "State", });
internal_static_hbase_pb_Master_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_hbase_pb_Master_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_Master_descriptor,
new java.lang.String[] { "Master", "RpcVersion", "InfoPort", });
internal_static_hbase_pb_ClusterUp_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_hbase_pb_ClusterUp_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ClusterUp_descriptor,
new java.lang.String[] { "StartDate", });
internal_static_hbase_pb_SplitLogTask_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_hbase_pb_SplitLogTask_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SplitLogTask_descriptor,
new java.lang.String[] { "State", "ServerName", });
internal_static_hbase_pb_DeprecatedTableState_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_DeprecatedTableState_descriptor,
new java.lang.String[] { "State", });
internal_static_hbase_pb_TableCF_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_hbase_pb_TableCF_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_TableCF_descriptor,
new java.lang.String[] { "TableName", "Families", });
internal_static_hbase_pb_ReplicationPeer_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_hbase_pb_ReplicationPeer_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ReplicationPeer_descriptor,
new java.lang.String[] { "Clusterkey", "ReplicationEndpointImpl", "Data", "Configuration", "TableCfs", "Namespaces", "Bandwidth", });
internal_static_hbase_pb_ReplicationState_descriptor =
getDescriptor().getMessageTypes().get(7);
internal_static_hbase_pb_ReplicationState_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ReplicationState_descriptor,
new java.lang.String[] { "State", });
internal_static_hbase_pb_ReplicationHLogPosition_descriptor =
getDescriptor().getMessageTypes().get(8);
internal_static_hbase_pb_ReplicationHLogPosition_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ReplicationHLogPosition_descriptor,
new java.lang.String[] { "Position", });
internal_static_hbase_pb_SwitchState_descriptor =
getDescriptor().getMessageTypes().get(9);
internal_static_hbase_pb_SwitchState_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SwitchState_descriptor,
new java.lang.String[] { "Enabled", });
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.hbase.protobuf.generated.TableProtos.getDescriptor(),
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(),
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}