org.apache.hadoop.ha.proto.HAServiceProtocolProtos Maven / Gradle / Ivy
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: HAServiceProtocol.proto
package org.apache.hadoop.ha.proto;
public final class HAServiceProtocolProtos {
private HAServiceProtocolProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
/**
* Protobuf enum {@code hadoop.common.HAServiceStateProto}
*/
public enum HAServiceStateProto
implements com.google.protobuf.ProtocolMessageEnum {
/**
* INITIALIZING = 0;
*/
INITIALIZING(0, 0),
/**
* ACTIVE = 1;
*/
ACTIVE(1, 1),
/**
* STANDBY = 2;
*/
STANDBY(2, 2),
/**
* OBSERVER = 3;
*/
OBSERVER(3, 3),
;
/**
* INITIALIZING = 0;
*/
public static final int INITIALIZING_VALUE = 0;
/**
* ACTIVE = 1;
*/
public static final int ACTIVE_VALUE = 1;
/**
* STANDBY = 2;
*/
public static final int STANDBY_VALUE = 2;
/**
* OBSERVER = 3;
*/
public static final int OBSERVER_VALUE = 3;
public final int getNumber() { return value; }
public static HAServiceStateProto valueOf(int value) {
switch (value) {
case 0: return INITIALIZING;
case 1: return ACTIVE;
case 2: return STANDBY;
case 3: return OBSERVER;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public HAServiceStateProto findValueByNumber(int number) {
return HAServiceStateProto.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.getDescriptor().getEnumTypes().get(0);
}
private static final HAServiceStateProto[] VALUES = values();
public static HAServiceStateProto valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private HAServiceStateProto(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.common.HAServiceStateProto)
}
/**
* Protobuf enum {@code hadoop.common.HARequestSource}
*/
public enum HARequestSource
implements com.google.protobuf.ProtocolMessageEnum {
/**
* REQUEST_BY_USER = 0;
*/
REQUEST_BY_USER(0, 0),
/**
* REQUEST_BY_USER_FORCED = 1;
*/
REQUEST_BY_USER_FORCED(1, 1),
/**
* REQUEST_BY_ZKFC = 2;
*/
REQUEST_BY_ZKFC(2, 2),
;
/**
* REQUEST_BY_USER = 0;
*/
public static final int REQUEST_BY_USER_VALUE = 0;
/**
* REQUEST_BY_USER_FORCED = 1;
*/
public static final int REQUEST_BY_USER_FORCED_VALUE = 1;
/**
* REQUEST_BY_ZKFC = 2;
*/
public static final int REQUEST_BY_ZKFC_VALUE = 2;
public final int getNumber() { return value; }
public static HARequestSource valueOf(int value) {
switch (value) {
case 0: return REQUEST_BY_USER;
case 1: return REQUEST_BY_USER_FORCED;
case 2: return REQUEST_BY_ZKFC;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap() {
public HARequestSource findValueByNumber(int number) {
return HARequestSource.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.getDescriptor().getEnumTypes().get(1);
}
private static final HARequestSource[] VALUES = values();
public static HARequestSource valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private HARequestSource(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:hadoop.common.HARequestSource)
}
public interface HAStateChangeRequestInfoProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.common.HARequestSource reqSource = 1;
/**
* required .hadoop.common.HARequestSource reqSource = 1;
*/
boolean hasReqSource();
/**
* required .hadoop.common.HARequestSource reqSource = 1;
*/
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource getReqSource();
}
/**
* Protobuf type {@code hadoop.common.HAStateChangeRequestInfoProto}
*/
public static final class HAStateChangeRequestInfoProto extends
com.google.protobuf.GeneratedMessage
implements HAStateChangeRequestInfoProtoOrBuilder {
// Use HAStateChangeRequestInfoProto.newBuilder() to construct.
private HAStateChangeRequestInfoProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private HAStateChangeRequestInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final HAStateChangeRequestInfoProto defaultInstance;
public static HAStateChangeRequestInfoProto getDefaultInstance() {
return defaultInstance;
}
public HAStateChangeRequestInfoProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private HAStateChangeRequestInfoProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource value = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
reqSource_ = value;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_HAStateChangeRequestInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_HAStateChangeRequestInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public HAStateChangeRequestInfoProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new HAStateChangeRequestInfoProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.common.HARequestSource reqSource = 1;
public static final int REQSOURCE_FIELD_NUMBER = 1;
private org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource reqSource_;
/**
* required .hadoop.common.HARequestSource reqSource = 1;
*/
public boolean hasReqSource() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.common.HARequestSource reqSource = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource getReqSource() {
return reqSource_;
}
private void initFields() {
reqSource_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource.REQUEST_BY_USER;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasReqSource()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, reqSource_.getNumber());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, reqSource_.getNumber());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto)) {
return super.equals(obj);
}
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto other = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto) obj;
boolean result = true;
result = result && (hasReqSource() == other.hasReqSource());
if (hasReqSource()) {
result = result &&
(getReqSource() == other.getReqSource());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasReqSource()) {
hash = (37 * hash) + REQSOURCE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getReqSource());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.common.HAStateChangeRequestInfoProto}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_HAStateChangeRequestInfoProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_HAStateChangeRequestInfoProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder.class);
}
// Construct using org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
reqSource_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource.REQUEST_BY_USER;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_HAStateChangeRequestInfoProto_descriptor;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto getDefaultInstanceForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto build() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto buildPartial() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto result = new org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.reqSource_ = reqSource_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto) {
return mergeFrom((org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto other) {
if (other == org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance()) return this;
if (other.hasReqSource()) {
setReqSource(other.getReqSource());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasReqSource()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.common.HARequestSource reqSource = 1;
private org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource reqSource_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource.REQUEST_BY_USER;
/**
* required .hadoop.common.HARequestSource reqSource = 1;
*/
public boolean hasReqSource() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.common.HARequestSource reqSource = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource getReqSource() {
return reqSource_;
}
/**
* required .hadoop.common.HARequestSource reqSource = 1;
*/
public Builder setReqSource(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
reqSource_ = value;
onChanged();
return this;
}
/**
* required .hadoop.common.HARequestSource reqSource = 1;
*/
public Builder clearReqSource() {
bitField0_ = (bitField0_ & ~0x00000001);
reqSource_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource.REQUEST_BY_USER;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.common.HAStateChangeRequestInfoProto)
}
static {
defaultInstance = new HAStateChangeRequestInfoProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.common.HAStateChangeRequestInfoProto)
}
public interface MonitorHealthRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.common.MonitorHealthRequestProto}
*
*
**
* void request
*
*/
public static final class MonitorHealthRequestProto extends
com.google.protobuf.GeneratedMessage
implements MonitorHealthRequestProtoOrBuilder {
// Use MonitorHealthRequestProto.newBuilder() to construct.
private MonitorHealthRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private MonitorHealthRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final MonitorHealthRequestProto defaultInstance;
public static MonitorHealthRequestProto getDefaultInstance() {
return defaultInstance;
}
public MonitorHealthRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private MonitorHealthRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_MonitorHealthRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_MonitorHealthRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public MonitorHealthRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new MonitorHealthRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto other = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.common.MonitorHealthRequestProto}
*
*
**
* void request
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_MonitorHealthRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_MonitorHealthRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_MonitorHealthRequestProto_descriptor;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto.getDefaultInstance();
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto build() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto buildPartial() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto result = new org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto) {
return mergeFrom((org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto other) {
if (other == org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.common.MonitorHealthRequestProto)
}
static {
defaultInstance = new MonitorHealthRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.common.MonitorHealthRequestProto)
}
public interface MonitorHealthResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.common.MonitorHealthResponseProto}
*
*
**
* void response
*
*/
public static final class MonitorHealthResponseProto extends
com.google.protobuf.GeneratedMessage
implements MonitorHealthResponseProtoOrBuilder {
// Use MonitorHealthResponseProto.newBuilder() to construct.
private MonitorHealthResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private MonitorHealthResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final MonitorHealthResponseProto defaultInstance;
public static MonitorHealthResponseProto getDefaultInstance() {
return defaultInstance;
}
public MonitorHealthResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private MonitorHealthResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_MonitorHealthResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_MonitorHealthResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public MonitorHealthResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new MonitorHealthResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto other = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.common.MonitorHealthResponseProto}
*
*
**
* void response
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_MonitorHealthResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_MonitorHealthResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_MonitorHealthResponseProto_descriptor;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.getDefaultInstance();
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto build() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto buildPartial() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto result = new org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto) {
return mergeFrom((org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto other) {
if (other == org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.common.MonitorHealthResponseProto)
}
static {
defaultInstance = new MonitorHealthResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.common.MonitorHealthResponseProto)
}
public interface TransitionToActiveRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
boolean hasReqInfo();
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto getReqInfo();
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder getReqInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.common.TransitionToActiveRequestProto}
*
*
**
* void request
*
*/
public static final class TransitionToActiveRequestProto extends
com.google.protobuf.GeneratedMessage
implements TransitionToActiveRequestProtoOrBuilder {
// Use TransitionToActiveRequestProto.newBuilder() to construct.
private TransitionToActiveRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private TransitionToActiveRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final TransitionToActiveRequestProto defaultInstance;
public static TransitionToActiveRequestProto getDefaultInstance() {
return defaultInstance;
}
public TransitionToActiveRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TransitionToActiveRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = reqInfo_.toBuilder();
}
reqInfo_ = input.readMessage(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(reqInfo_);
reqInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToActiveRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToActiveRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public TransitionToActiveRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new TransitionToActiveRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
public static final int REQINFO_FIELD_NUMBER = 1;
private org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto reqInfo_;
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public boolean hasReqInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto getReqInfo() {
return reqInfo_;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder getReqInfoOrBuilder() {
return reqInfo_;
}
private void initFields() {
reqInfo_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasReqInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!getReqInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, reqInfo_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, reqInfo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto other = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto) obj;
boolean result = true;
result = result && (hasReqInfo() == other.hasReqInfo());
if (hasReqInfo()) {
result = result && getReqInfo()
.equals(other.getReqInfo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasReqInfo()) {
hash = (37 * hash) + REQINFO_FIELD_NUMBER;
hash = (53 * hash) + getReqInfo().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.common.TransitionToActiveRequestProto}
*
*
**
* void request
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToActiveRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToActiveRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getReqInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (reqInfoBuilder_ == null) {
reqInfo_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
} else {
reqInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToActiveRequestProto_descriptor;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto.getDefaultInstance();
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto build() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto buildPartial() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto result = new org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (reqInfoBuilder_ == null) {
result.reqInfo_ = reqInfo_;
} else {
result.reqInfo_ = reqInfoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto) {
return mergeFrom((org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto other) {
if (other == org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto.getDefaultInstance()) return this;
if (other.hasReqInfo()) {
mergeReqInfo(other.getReqInfo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasReqInfo()) {
return false;
}
if (!getReqInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
private org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto reqInfo_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder> reqInfoBuilder_;
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public boolean hasReqInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto getReqInfo() {
if (reqInfoBuilder_ == null) {
return reqInfo_;
} else {
return reqInfoBuilder_.getMessage();
}
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public Builder setReqInfo(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto value) {
if (reqInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
reqInfo_ = value;
onChanged();
} else {
reqInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public Builder setReqInfo(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder builderForValue) {
if (reqInfoBuilder_ == null) {
reqInfo_ = builderForValue.build();
onChanged();
} else {
reqInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public Builder mergeReqInfo(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto value) {
if (reqInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
reqInfo_ != org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance()) {
reqInfo_ =
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
} else {
reqInfo_ = value;
}
onChanged();
} else {
reqInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public Builder clearReqInfo() {
if (reqInfoBuilder_ == null) {
reqInfo_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
onChanged();
} else {
reqInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder getReqInfoBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getReqInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder getReqInfoOrBuilder() {
if (reqInfoBuilder_ != null) {
return reqInfoBuilder_.getMessageOrBuilder();
} else {
return reqInfo_;
}
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder>
getReqInfoFieldBuilder() {
if (reqInfoBuilder_ == null) {
reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder>(
reqInfo_,
getParentForChildren(),
isClean());
reqInfo_ = null;
}
return reqInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.common.TransitionToActiveRequestProto)
}
static {
defaultInstance = new TransitionToActiveRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.common.TransitionToActiveRequestProto)
}
public interface TransitionToActiveResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.common.TransitionToActiveResponseProto}
*
*
**
* void response
*
*/
public static final class TransitionToActiveResponseProto extends
com.google.protobuf.GeneratedMessage
implements TransitionToActiveResponseProtoOrBuilder {
// Use TransitionToActiveResponseProto.newBuilder() to construct.
private TransitionToActiveResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private TransitionToActiveResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final TransitionToActiveResponseProto defaultInstance;
public static TransitionToActiveResponseProto getDefaultInstance() {
return defaultInstance;
}
public TransitionToActiveResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TransitionToActiveResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToActiveResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToActiveResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public TransitionToActiveResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new TransitionToActiveResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto other = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.common.TransitionToActiveResponseProto}
*
*
**
* void response
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToActiveResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToActiveResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToActiveResponseProto_descriptor;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.getDefaultInstance();
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto build() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto buildPartial() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto result = new org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto) {
return mergeFrom((org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto other) {
if (other == org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.common.TransitionToActiveResponseProto)
}
static {
defaultInstance = new TransitionToActiveResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.common.TransitionToActiveResponseProto)
}
public interface TransitionToStandbyRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
boolean hasReqInfo();
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto getReqInfo();
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder getReqInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.common.TransitionToStandbyRequestProto}
*
*
**
* void request
*
*/
public static final class TransitionToStandbyRequestProto extends
com.google.protobuf.GeneratedMessage
implements TransitionToStandbyRequestProtoOrBuilder {
// Use TransitionToStandbyRequestProto.newBuilder() to construct.
private TransitionToStandbyRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private TransitionToStandbyRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final TransitionToStandbyRequestProto defaultInstance;
public static TransitionToStandbyRequestProto getDefaultInstance() {
return defaultInstance;
}
public TransitionToStandbyRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TransitionToStandbyRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = reqInfo_.toBuilder();
}
reqInfo_ = input.readMessage(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(reqInfo_);
reqInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToStandbyRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToStandbyRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public TransitionToStandbyRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new TransitionToStandbyRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
public static final int REQINFO_FIELD_NUMBER = 1;
private org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto reqInfo_;
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public boolean hasReqInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto getReqInfo() {
return reqInfo_;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder getReqInfoOrBuilder() {
return reqInfo_;
}
private void initFields() {
reqInfo_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasReqInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!getReqInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, reqInfo_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, reqInfo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto other = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto) obj;
boolean result = true;
result = result && (hasReqInfo() == other.hasReqInfo());
if (hasReqInfo()) {
result = result && getReqInfo()
.equals(other.getReqInfo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasReqInfo()) {
hash = (37 * hash) + REQINFO_FIELD_NUMBER;
hash = (53 * hash) + getReqInfo().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.common.TransitionToStandbyRequestProto}
*
*
**
* void request
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToStandbyRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToStandbyRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getReqInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (reqInfoBuilder_ == null) {
reqInfo_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
} else {
reqInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToStandbyRequestProto_descriptor;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto.getDefaultInstance();
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto build() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto buildPartial() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto result = new org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (reqInfoBuilder_ == null) {
result.reqInfo_ = reqInfo_;
} else {
result.reqInfo_ = reqInfoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto) {
return mergeFrom((org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto other) {
if (other == org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto.getDefaultInstance()) return this;
if (other.hasReqInfo()) {
mergeReqInfo(other.getReqInfo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasReqInfo()) {
return false;
}
if (!getReqInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
private org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto reqInfo_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder> reqInfoBuilder_;
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public boolean hasReqInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto getReqInfo() {
if (reqInfoBuilder_ == null) {
return reqInfo_;
} else {
return reqInfoBuilder_.getMessage();
}
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public Builder setReqInfo(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto value) {
if (reqInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
reqInfo_ = value;
onChanged();
} else {
reqInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public Builder setReqInfo(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder builderForValue) {
if (reqInfoBuilder_ == null) {
reqInfo_ = builderForValue.build();
onChanged();
} else {
reqInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public Builder mergeReqInfo(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto value) {
if (reqInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
reqInfo_ != org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance()) {
reqInfo_ =
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
} else {
reqInfo_ = value;
}
onChanged();
} else {
reqInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public Builder clearReqInfo() {
if (reqInfoBuilder_ == null) {
reqInfo_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
onChanged();
} else {
reqInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder getReqInfoBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getReqInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder getReqInfoOrBuilder() {
if (reqInfoBuilder_ != null) {
return reqInfoBuilder_.getMessageOrBuilder();
} else {
return reqInfo_;
}
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder>
getReqInfoFieldBuilder() {
if (reqInfoBuilder_ == null) {
reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder>(
reqInfo_,
getParentForChildren(),
isClean());
reqInfo_ = null;
}
return reqInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.common.TransitionToStandbyRequestProto)
}
static {
defaultInstance = new TransitionToStandbyRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.common.TransitionToStandbyRequestProto)
}
public interface TransitionToStandbyResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.common.TransitionToStandbyResponseProto}
*
*
**
* void response
*
*/
public static final class TransitionToStandbyResponseProto extends
com.google.protobuf.GeneratedMessage
implements TransitionToStandbyResponseProtoOrBuilder {
// Use TransitionToStandbyResponseProto.newBuilder() to construct.
private TransitionToStandbyResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private TransitionToStandbyResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final TransitionToStandbyResponseProto defaultInstance;
public static TransitionToStandbyResponseProto getDefaultInstance() {
return defaultInstance;
}
public TransitionToStandbyResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TransitionToStandbyResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToStandbyResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToStandbyResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public TransitionToStandbyResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new TransitionToStandbyResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto other = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.common.TransitionToStandbyResponseProto}
*
*
**
* void response
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToStandbyResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToStandbyResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToStandbyResponseProto_descriptor;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.getDefaultInstance();
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto build() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto buildPartial() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto result = new org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto) {
return mergeFrom((org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto other) {
if (other == org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.common.TransitionToStandbyResponseProto)
}
static {
defaultInstance = new TransitionToStandbyResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.common.TransitionToStandbyResponseProto)
}
public interface TransitionToObserverRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
boolean hasReqInfo();
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto getReqInfo();
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder getReqInfoOrBuilder();
}
/**
* Protobuf type {@code hadoop.common.TransitionToObserverRequestProto}
*
*
**
* void request
*
*/
public static final class TransitionToObserverRequestProto extends
com.google.protobuf.GeneratedMessage
implements TransitionToObserverRequestProtoOrBuilder {
// Use TransitionToObserverRequestProto.newBuilder() to construct.
private TransitionToObserverRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private TransitionToObserverRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final TransitionToObserverRequestProto defaultInstance;
public static TransitionToObserverRequestProto getDefaultInstance() {
return defaultInstance;
}
public TransitionToObserverRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TransitionToObserverRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = reqInfo_.toBuilder();
}
reqInfo_ = input.readMessage(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(reqInfo_);
reqInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToObserverRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToObserverRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public TransitionToObserverRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new TransitionToObserverRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
public static final int REQINFO_FIELD_NUMBER = 1;
private org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto reqInfo_;
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public boolean hasReqInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto getReqInfo() {
return reqInfo_;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder getReqInfoOrBuilder() {
return reqInfo_;
}
private void initFields() {
reqInfo_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasReqInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!getReqInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, reqInfo_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, reqInfo_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto other = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto) obj;
boolean result = true;
result = result && (hasReqInfo() == other.hasReqInfo());
if (hasReqInfo()) {
result = result && getReqInfo()
.equals(other.getReqInfo());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasReqInfo()) {
hash = (37 * hash) + REQINFO_FIELD_NUMBER;
hash = (53 * hash) + getReqInfo().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.common.TransitionToObserverRequestProto}
*
*
**
* void request
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToObserverRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToObserverRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getReqInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (reqInfoBuilder_ == null) {
reqInfo_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
} else {
reqInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToObserverRequestProto_descriptor;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto.getDefaultInstance();
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto build() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto buildPartial() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto result = new org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (reqInfoBuilder_ == null) {
result.reqInfo_ = reqInfo_;
} else {
result.reqInfo_ = reqInfoBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto) {
return mergeFrom((org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto other) {
if (other == org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto.getDefaultInstance()) return this;
if (other.hasReqInfo()) {
mergeReqInfo(other.getReqInfo());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasReqInfo()) {
return false;
}
if (!getReqInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
private org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto reqInfo_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder> reqInfoBuilder_;
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public boolean hasReqInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto getReqInfo() {
if (reqInfoBuilder_ == null) {
return reqInfo_;
} else {
return reqInfoBuilder_.getMessage();
}
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public Builder setReqInfo(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto value) {
if (reqInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
reqInfo_ = value;
onChanged();
} else {
reqInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public Builder setReqInfo(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder builderForValue) {
if (reqInfoBuilder_ == null) {
reqInfo_ = builderForValue.build();
onChanged();
} else {
reqInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public Builder mergeReqInfo(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto value) {
if (reqInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
reqInfo_ != org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance()) {
reqInfo_ =
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
} else {
reqInfo_ = value;
}
onChanged();
} else {
reqInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public Builder clearReqInfo() {
if (reqInfoBuilder_ == null) {
reqInfo_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.getDefaultInstance();
onChanged();
} else {
reqInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder getReqInfoBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getReqInfoFieldBuilder().getBuilder();
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder getReqInfoOrBuilder() {
if (reqInfoBuilder_ != null) {
return reqInfoBuilder_.getMessageOrBuilder();
} else {
return reqInfo_;
}
}
/**
* required .hadoop.common.HAStateChangeRequestInfoProto reqInfo = 1;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder>
getReqInfoFieldBuilder() {
if (reqInfoBuilder_ == null) {
reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto.Builder, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProtoOrBuilder>(
reqInfo_,
getParentForChildren(),
isClean());
reqInfo_ = null;
}
return reqInfoBuilder_;
}
// @@protoc_insertion_point(builder_scope:hadoop.common.TransitionToObserverRequestProto)
}
static {
defaultInstance = new TransitionToObserverRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.common.TransitionToObserverRequestProto)
}
public interface TransitionToObserverResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.common.TransitionToObserverResponseProto}
*
*
**
* void response
*
*/
public static final class TransitionToObserverResponseProto extends
com.google.protobuf.GeneratedMessage
implements TransitionToObserverResponseProtoOrBuilder {
// Use TransitionToObserverResponseProto.newBuilder() to construct.
private TransitionToObserverResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private TransitionToObserverResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final TransitionToObserverResponseProto defaultInstance;
public static TransitionToObserverResponseProto getDefaultInstance() {
return defaultInstance;
}
public TransitionToObserverResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private TransitionToObserverResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToObserverResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToObserverResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public TransitionToObserverResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new TransitionToObserverResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto other = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.common.TransitionToObserverResponseProto}
*
*
**
* void response
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToObserverResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToObserverResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_TransitionToObserverResponseProto_descriptor;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.getDefaultInstance();
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto build() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto buildPartial() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto result = new org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto) {
return mergeFrom((org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto other) {
if (other == org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.common.TransitionToObserverResponseProto)
}
static {
defaultInstance = new TransitionToObserverResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.common.TransitionToObserverResponseProto)
}
public interface GetServiceStatusRequestProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
/**
* Protobuf type {@code hadoop.common.GetServiceStatusRequestProto}
*
*
**
* void request
*
*/
public static final class GetServiceStatusRequestProto extends
com.google.protobuf.GeneratedMessage
implements GetServiceStatusRequestProtoOrBuilder {
// Use GetServiceStatusRequestProto.newBuilder() to construct.
private GetServiceStatusRequestProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetServiceStatusRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetServiceStatusRequestProto defaultInstance;
public static GetServiceStatusRequestProto getDefaultInstance() {
return defaultInstance;
}
public GetServiceStatusRequestProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetServiceStatusRequestProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_GetServiceStatusRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_GetServiceStatusRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetServiceStatusRequestProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetServiceStatusRequestProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private void initFields() {
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto)) {
return super.equals(obj);
}
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto other = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto) obj;
boolean result = true;
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.common.GetServiceStatusRequestProto}
*
*
**
* void request
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_GetServiceStatusRequestProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_GetServiceStatusRequestProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto.Builder.class);
}
// Construct using org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_GetServiceStatusRequestProto_descriptor;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto getDefaultInstanceForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto.getDefaultInstance();
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto build() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto buildPartial() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto result = new org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto(this);
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto) {
return mergeFrom((org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto other) {
if (other == org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto.getDefaultInstance()) return this;
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.common.GetServiceStatusRequestProto)
}
static {
defaultInstance = new GetServiceStatusRequestProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.common.GetServiceStatusRequestProto)
}
public interface GetServiceStatusResponseProtoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .hadoop.common.HAServiceStateProto state = 1;
/**
* required .hadoop.common.HAServiceStateProto state = 1;
*/
boolean hasState();
/**
* required .hadoop.common.HAServiceStateProto state = 1;
*/
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto getState();
// optional bool readyToBecomeActive = 2;
/**
* optional bool readyToBecomeActive = 2;
*
*
* If state is STANDBY, indicate whether it is
* ready to become active.
*
*/
boolean hasReadyToBecomeActive();
/**
* optional bool readyToBecomeActive = 2;
*
*
* If state is STANDBY, indicate whether it is
* ready to become active.
*
*/
boolean getReadyToBecomeActive();
// optional string notReadyReason = 3;
/**
* optional string notReadyReason = 3;
*
*
* If not ready to become active, a textual explanation of why not
*
*/
boolean hasNotReadyReason();
/**
* optional string notReadyReason = 3;
*
*
* If not ready to become active, a textual explanation of why not
*
*/
java.lang.String getNotReadyReason();
/**
* optional string notReadyReason = 3;
*
*
* If not ready to become active, a textual explanation of why not
*
*/
com.google.protobuf.ByteString
getNotReadyReasonBytes();
}
/**
* Protobuf type {@code hadoop.common.GetServiceStatusResponseProto}
*
*
**
* Returns the state of the service
*
*/
public static final class GetServiceStatusResponseProto extends
com.google.protobuf.GeneratedMessage
implements GetServiceStatusResponseProtoOrBuilder {
// Use GetServiceStatusResponseProto.newBuilder() to construct.
private GetServiceStatusResponseProto(com.google.protobuf.GeneratedMessage.Builder> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private GetServiceStatusResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final GetServiceStatusResponseProto defaultInstance;
public static GetServiceStatusResponseProto getDefaultInstance() {
return defaultInstance;
}
public GetServiceStatusResponseProto getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private GetServiceStatusResponseProto(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
int rawValue = input.readEnum();
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto value = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(1, rawValue);
} else {
bitField0_ |= 0x00000001;
state_ = value;
}
break;
}
case 16: {
bitField0_ |= 0x00000002;
readyToBecomeActive_ = input.readBool();
break;
}
case 26: {
bitField0_ |= 0x00000004;
notReadyReason_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_GetServiceStatusResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_GetServiceStatusResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.Builder.class);
}
public static com.google.protobuf.Parser PARSER =
new com.google.protobuf.AbstractParser() {
public GetServiceStatusResponseProto parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new GetServiceStatusResponseProto(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser getParserForType() {
return PARSER;
}
private int bitField0_;
// required .hadoop.common.HAServiceStateProto state = 1;
public static final int STATE_FIELD_NUMBER = 1;
private org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto state_;
/**
* required .hadoop.common.HAServiceStateProto state = 1;
*/
public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.common.HAServiceStateProto state = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto getState() {
return state_;
}
// optional bool readyToBecomeActive = 2;
public static final int READYTOBECOMEACTIVE_FIELD_NUMBER = 2;
private boolean readyToBecomeActive_;
/**
* optional bool readyToBecomeActive = 2;
*
*
* If state is STANDBY, indicate whether it is
* ready to become active.
*
*/
public boolean hasReadyToBecomeActive() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bool readyToBecomeActive = 2;
*
*
* If state is STANDBY, indicate whether it is
* ready to become active.
*
*/
public boolean getReadyToBecomeActive() {
return readyToBecomeActive_;
}
// optional string notReadyReason = 3;
public static final int NOTREADYREASON_FIELD_NUMBER = 3;
private java.lang.Object notReadyReason_;
/**
* optional string notReadyReason = 3;
*
*
* If not ready to become active, a textual explanation of why not
*
*/
public boolean hasNotReadyReason() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string notReadyReason = 3;
*
*
* If not ready to become active, a textual explanation of why not
*
*/
public java.lang.String getNotReadyReason() {
java.lang.Object ref = notReadyReason_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
notReadyReason_ = s;
}
return s;
}
}
/**
* optional string notReadyReason = 3;
*
*
* If not ready to become active, a textual explanation of why not
*
*/
public com.google.protobuf.ByteString
getNotReadyReasonBytes() {
java.lang.Object ref = notReadyReason_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
notReadyReason_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
state_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto.INITIALIZING;
readyToBecomeActive_ = false;
notReadyReason_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasState()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeEnum(1, state_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBool(2, readyToBecomeActive_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(3, getNotReadyReasonBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(1, state_.getNumber());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(2, readyToBecomeActive_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getNotReadyReasonBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto)) {
return super.equals(obj);
}
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto other = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto) obj;
boolean result = true;
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result && (hasReadyToBecomeActive() == other.hasReadyToBecomeActive());
if (hasReadyToBecomeActive()) {
result = result && (getReadyToBecomeActive()
== other.getReadyToBecomeActive());
}
result = result && (hasNotReadyReason() == other.hasNotReadyReason());
if (hasNotReadyReason()) {
result = result && getNotReadyReason()
.equals(other.getNotReadyReason());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
if (hasReadyToBecomeActive()) {
hash = (37 * hash) + READYTOBECOMEACTIVE_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getReadyToBecomeActive());
}
if (hasNotReadyReason()) {
hash = (37 * hash) + NOTREADYREASON_FIELD_NUMBER;
hash = (53 * hash) + getNotReadyReason().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hadoop.common.GetServiceStatusResponseProto}
*
*
**
* Returns the state of the service
*
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
implements org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProtoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_GetServiceStatusResponseProto_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_GetServiceStatusResponseProto_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.class, org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.Builder.class);
}
// Construct using org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
state_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto.INITIALIZING;
bitField0_ = (bitField0_ & ~0x00000001);
readyToBecomeActive_ = false;
bitField0_ = (bitField0_ & ~0x00000002);
notReadyReason_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.internal_static_hadoop_common_GetServiceStatusResponseProto_descriptor;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto getDefaultInstanceForType() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.getDefaultInstance();
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto build() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto buildPartial() {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto result = new org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.state_ = state_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.readyToBecomeActive_ = readyToBecomeActive_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.notReadyReason_ = notReadyReason_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto) {
return mergeFrom((org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto other) {
if (other == org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.getDefaultInstance()) return this;
if (other.hasState()) {
setState(other.getState());
}
if (other.hasReadyToBecomeActive()) {
setReadyToBecomeActive(other.getReadyToBecomeActive());
}
if (other.hasNotReadyReason()) {
bitField0_ |= 0x00000004;
notReadyReason_ = other.notReadyReason_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasState()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .hadoop.common.HAServiceStateProto state = 1;
private org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto state_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto.INITIALIZING;
/**
* required .hadoop.common.HAServiceStateProto state = 1;
*/
public boolean hasState() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* required .hadoop.common.HAServiceStateProto state = 1;
*/
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto getState() {
return state_;
}
/**
* required .hadoop.common.HAServiceStateProto state = 1;
*/
public Builder setState(org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
state_ = value;
onChanged();
return this;
}
/**
* required .hadoop.common.HAServiceStateProto state = 1;
*/
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000001);
state_ = org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto.INITIALIZING;
onChanged();
return this;
}
// optional bool readyToBecomeActive = 2;
private boolean readyToBecomeActive_ ;
/**
* optional bool readyToBecomeActive = 2;
*
*
* If state is STANDBY, indicate whether it is
* ready to become active.
*
*/
public boolean hasReadyToBecomeActive() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* optional bool readyToBecomeActive = 2;
*
*
* If state is STANDBY, indicate whether it is
* ready to become active.
*
*/
public boolean getReadyToBecomeActive() {
return readyToBecomeActive_;
}
/**
* optional bool readyToBecomeActive = 2;
*
*
* If state is STANDBY, indicate whether it is
* ready to become active.
*
*/
public Builder setReadyToBecomeActive(boolean value) {
bitField0_ |= 0x00000002;
readyToBecomeActive_ = value;
onChanged();
return this;
}
/**
* optional bool readyToBecomeActive = 2;
*
*
* If state is STANDBY, indicate whether it is
* ready to become active.
*
*/
public Builder clearReadyToBecomeActive() {
bitField0_ = (bitField0_ & ~0x00000002);
readyToBecomeActive_ = false;
onChanged();
return this;
}
// optional string notReadyReason = 3;
private java.lang.Object notReadyReason_ = "";
/**
* optional string notReadyReason = 3;
*
*
* If not ready to become active, a textual explanation of why not
*
*/
public boolean hasNotReadyReason() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* optional string notReadyReason = 3;
*
*
* If not ready to become active, a textual explanation of why not
*
*/
public java.lang.String getNotReadyReason() {
java.lang.Object ref = notReadyReason_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
notReadyReason_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* optional string notReadyReason = 3;
*
*
* If not ready to become active, a textual explanation of why not
*
*/
public com.google.protobuf.ByteString
getNotReadyReasonBytes() {
java.lang.Object ref = notReadyReason_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
notReadyReason_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* optional string notReadyReason = 3;
*
*
* If not ready to become active, a textual explanation of why not
*
*/
public Builder setNotReadyReason(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
notReadyReason_ = value;
onChanged();
return this;
}
/**
* optional string notReadyReason = 3;
*
*
* If not ready to become active, a textual explanation of why not
*
*/
public Builder clearNotReadyReason() {
bitField0_ = (bitField0_ & ~0x00000004);
notReadyReason_ = getDefaultInstance().getNotReadyReason();
onChanged();
return this;
}
/**
* optional string notReadyReason = 3;
*
*
* If not ready to become active, a textual explanation of why not
*
*/
public Builder setNotReadyReasonBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
notReadyReason_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hadoop.common.GetServiceStatusResponseProto)
}
static {
defaultInstance = new GetServiceStatusResponseProto(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hadoop.common.GetServiceStatusResponseProto)
}
/**
* Protobuf service {@code hadoop.common.HAServiceProtocolService}
*
*
**
* Protocol interface provides High availability related
* primitives to monitor and failover a service.
*
* For details see o.a.h.ha.HAServiceProtocol.
*
*/
public static abstract class HAServiceProtocolService
implements com.google.protobuf.Service {
protected HAServiceProtocolService() {}
public interface Interface {
/**
* rpc monitorHealth(.hadoop.common.MonitorHealthRequestProto) returns (.hadoop.common.MonitorHealthResponseProto);
*
*
**
* Monitor the health of a service.
*
*/
public abstract void monitorHealth(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc transitionToActive(.hadoop.common.TransitionToActiveRequestProto) returns (.hadoop.common.TransitionToActiveResponseProto);
*
*
**
* Request service to tranisition to active state.
*
*/
public abstract void transitionToActive(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc transitionToStandby(.hadoop.common.TransitionToStandbyRequestProto) returns (.hadoop.common.TransitionToStandbyResponseProto);
*
*
**
* Request service to transition to standby state.
*
*/
public abstract void transitionToStandby(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc transitionToObserver(.hadoop.common.TransitionToObserverRequestProto) returns (.hadoop.common.TransitionToObserverResponseProto);
*
*
**
* Request service to transition to observer state.
*
*/
public abstract void transitionToObserver(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getServiceStatus(.hadoop.common.GetServiceStatusRequestProto) returns (.hadoop.common.GetServiceStatusResponseProto);
*
*
**
* Get the current status of the service.
*
*/
public abstract void getServiceStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto request,
com.google.protobuf.RpcCallback done);
}
public static com.google.protobuf.Service newReflectiveService(
final Interface impl) {
return new HAServiceProtocolService() {
@java.lang.Override
public void monitorHealth(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.monitorHealth(controller, request, done);
}
@java.lang.Override
public void transitionToActive(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.transitionToActive(controller, request, done);
}
@java.lang.Override
public void transitionToStandby(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.transitionToStandby(controller, request, done);
}
@java.lang.Override
public void transitionToObserver(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.transitionToObserver(controller, request, done);
}
@java.lang.Override
public void getServiceStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getServiceStatus(controller, request, done);
}
};
}
public static com.google.protobuf.BlockingService
newReflectiveBlockingService(final BlockingInterface impl) {
return new com.google.protobuf.BlockingService() {
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final com.google.protobuf.Message callBlockingMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request)
throws com.google.protobuf.ServiceException {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callBlockingMethod() given method descriptor for " +
"wrong service type.");
}
switch(method.getIndex()) {
case 0:
return impl.monitorHealth(controller, (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto)request);
case 1:
return impl.transitionToActive(controller, (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto)request);
case 2:
return impl.transitionToStandby(controller, (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto)request);
case 3:
return impl.transitionToObserver(controller, (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto)request);
case 4:
return impl.getServiceStatus(controller, (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto.getDefaultInstance();
case 3:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto.getDefaultInstance();
case 4:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.getDefaultInstance();
case 3:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.getDefaultInstance();
case 4:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
};
}
/**
* rpc monitorHealth(.hadoop.common.MonitorHealthRequestProto) returns (.hadoop.common.MonitorHealthResponseProto);
*
*
**
* Monitor the health of a service.
*
*/
public abstract void monitorHealth(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc transitionToActive(.hadoop.common.TransitionToActiveRequestProto) returns (.hadoop.common.TransitionToActiveResponseProto);
*
*
**
* Request service to tranisition to active state.
*
*/
public abstract void transitionToActive(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc transitionToStandby(.hadoop.common.TransitionToStandbyRequestProto) returns (.hadoop.common.TransitionToStandbyResponseProto);
*
*
**
* Request service to transition to standby state.
*
*/
public abstract void transitionToStandby(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc transitionToObserver(.hadoop.common.TransitionToObserverRequestProto) returns (.hadoop.common.TransitionToObserverResponseProto);
*
*
**
* Request service to transition to observer state.
*
*/
public abstract void transitionToObserver(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getServiceStatus(.hadoop.common.GetServiceStatusRequestProto) returns (.hadoop.common.GetServiceStatusResponseProto);
*
*
**
* Get the current status of the service.
*
*/
public abstract void getServiceStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto request,
com.google.protobuf.RpcCallback done);
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.getDescriptor().getServices().get(0);
}
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final void callMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request,
com.google.protobuf.RpcCallback<
com.google.protobuf.Message> done) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callMethod() given method descriptor for wrong " +
"service type.");
}
switch(method.getIndex()) {
case 0:
this.monitorHealth(controller, (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 1:
this.transitionToActive(controller, (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 2:
this.transitionToStandby(controller, (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 3:
this.transitionToObserver(controller, (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 4:
this.getServiceStatus(controller, (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto.getDefaultInstance();
case 3:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto.getDefaultInstance();
case 4:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.getDefaultInstance();
case 3:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.getDefaultInstance();
case 4:
return org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public static Stub newStub(
com.google.protobuf.RpcChannel channel) {
return new Stub(channel);
}
public static final class Stub extends org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService implements Interface {
private Stub(com.google.protobuf.RpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.RpcChannel channel;
public com.google.protobuf.RpcChannel getChannel() {
return channel;
}
public void monitorHealth(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.class,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.getDefaultInstance()));
}
public void transitionToActive(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.class,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.getDefaultInstance()));
}
public void transitionToStandby(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.class,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.getDefaultInstance()));
}
public void transitionToObserver(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.class,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.getDefaultInstance()));
}
public void getServiceStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.class,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.getDefaultInstance()));
}
}
public static BlockingInterface newBlockingStub(
com.google.protobuf.BlockingRpcChannel channel) {
return new BlockingStub(channel);
}
public interface BlockingInterface {
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto monitorHealth(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto transitionToActive(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto transitionToStandby(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto transitionToObserver(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto getServiceStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto request)
throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.BlockingRpcChannel channel;
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto monitorHealth(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto.getDefaultInstance());
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto transitionToActive(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto.getDefaultInstance());
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto transitionToStandby(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto.getDefaultInstance());
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto transitionToObserver(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToObserverResponseProto.getDefaultInstance());
}
public org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto getServiceStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto.getDefaultInstance());
}
}
// @@protoc_insertion_point(class_scope:hadoop.common.HAServiceProtocolService)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_common_HAStateChangeRequestInfoProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_common_HAStateChangeRequestInfoProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_common_MonitorHealthRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_common_MonitorHealthRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_common_MonitorHealthResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_common_MonitorHealthResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_common_TransitionToActiveRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_common_TransitionToActiveRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_common_TransitionToActiveResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_common_TransitionToActiveResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_common_TransitionToStandbyRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_common_TransitionToStandbyRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_common_TransitionToStandbyResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_common_TransitionToStandbyResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_common_TransitionToObserverRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_common_TransitionToObserverRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_common_TransitionToObserverResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_common_TransitionToObserverResponseProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_common_GetServiceStatusRequestProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_common_GetServiceStatusRequestProto_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hadoop_common_GetServiceStatusResponseProto_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hadoop_common_GetServiceStatusResponseProto_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\027HAServiceProtocol.proto\022\rhadoop.common" +
"\"R\n\035HAStateChangeRequestInfoProto\0221\n\treq" +
"Source\030\001 \002(\0162\036.hadoop.common.HARequestSo" +
"urce\"\033\n\031MonitorHealthRequestProto\"\034\n\032Mon" +
"itorHealthResponseProto\"_\n\036TransitionToA" +
"ctiveRequestProto\022=\n\007reqInfo\030\001 \002(\0132,.had" +
"oop.common.HAStateChangeRequestInfoProto" +
"\"!\n\037TransitionToActiveResponseProto\"`\n\037T" +
"ransitionToStandbyRequestProto\022=\n\007reqInf" +
"o\030\001 \002(\0132,.hadoop.common.HAStateChangeReq",
"uestInfoProto\"\"\n TransitionToStandbyResp" +
"onseProto\"a\n TransitionToObserverRequest" +
"Proto\022=\n\007reqInfo\030\001 \002(\0132,.hadoop.common.H" +
"AStateChangeRequestInfoProto\"#\n!Transiti" +
"onToObserverResponseProto\"\036\n\034GetServiceS" +
"tatusRequestProto\"\207\001\n\035GetServiceStatusRe" +
"sponseProto\0221\n\005state\030\001 \002(\0162\".hadoop.comm" +
"on.HAServiceStateProto\022\033\n\023readyToBecomeA" +
"ctive\030\002 \001(\010\022\026\n\016notReadyReason\030\003 \001(\t*N\n\023H" +
"AServiceStateProto\022\020\n\014INITIALIZING\020\000\022\n\n\006",
"ACTIVE\020\001\022\013\n\007STANDBY\020\002\022\014\n\010OBSERVER\020\003*W\n\017H" +
"ARequestSource\022\023\n\017REQUEST_BY_USER\020\000\022\032\n\026R" +
"EQUEST_BY_USER_FORCED\020\001\022\023\n\017REQUEST_BY_ZK" +
"FC\020\0022\327\004\n\030HAServiceProtocolService\022d\n\rmon" +
"itorHealth\022(.hadoop.common.MonitorHealth" +
"RequestProto\032).hadoop.common.MonitorHeal" +
"thResponseProto\022s\n\022transitionToActive\022-." +
"hadoop.common.TransitionToActiveRequestP" +
"roto\032..hadoop.common.TransitionToActiveR" +
"esponseProto\022v\n\023transitionToStandby\022..ha",
"doop.common.TransitionToStandbyRequestPr" +
"oto\032/.hadoop.common.TransitionToStandbyR" +
"esponseProto\022y\n\024transitionToObserver\022/.h" +
"adoop.common.TransitionToObserverRequest" +
"Proto\0320.hadoop.common.TransitionToObserv" +
"erResponseProto\022m\n\020getServiceStatus\022+.ha" +
"doop.common.GetServiceStatusRequestProto" +
"\032,.hadoop.common.GetServiceStatusRespons" +
"eProtoB;\n\032org.apache.hadoop.ha.protoB\027HA" +
"ServiceProtocolProtos\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_hadoop_common_HAStateChangeRequestInfoProto_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_hadoop_common_HAStateChangeRequestInfoProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_common_HAStateChangeRequestInfoProto_descriptor,
new java.lang.String[] { "ReqSource", });
internal_static_hadoop_common_MonitorHealthRequestProto_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_hadoop_common_MonitorHealthRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_common_MonitorHealthRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_common_MonitorHealthResponseProto_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_hadoop_common_MonitorHealthResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_common_MonitorHealthResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_common_TransitionToActiveRequestProto_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_hadoop_common_TransitionToActiveRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_common_TransitionToActiveRequestProto_descriptor,
new java.lang.String[] { "ReqInfo", });
internal_static_hadoop_common_TransitionToActiveResponseProto_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_hadoop_common_TransitionToActiveResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_common_TransitionToActiveResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_common_TransitionToStandbyRequestProto_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_hadoop_common_TransitionToStandbyRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_common_TransitionToStandbyRequestProto_descriptor,
new java.lang.String[] { "ReqInfo", });
internal_static_hadoop_common_TransitionToStandbyResponseProto_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_hadoop_common_TransitionToStandbyResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_common_TransitionToStandbyResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_common_TransitionToObserverRequestProto_descriptor =
getDescriptor().getMessageTypes().get(7);
internal_static_hadoop_common_TransitionToObserverRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_common_TransitionToObserverRequestProto_descriptor,
new java.lang.String[] { "ReqInfo", });
internal_static_hadoop_common_TransitionToObserverResponseProto_descriptor =
getDescriptor().getMessageTypes().get(8);
internal_static_hadoop_common_TransitionToObserverResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_common_TransitionToObserverResponseProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_common_GetServiceStatusRequestProto_descriptor =
getDescriptor().getMessageTypes().get(9);
internal_static_hadoop_common_GetServiceStatusRequestProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_common_GetServiceStatusRequestProto_descriptor,
new java.lang.String[] { });
internal_static_hadoop_common_GetServiceStatusResponseProto_descriptor =
getDescriptor().getMessageTypes().get(10);
internal_static_hadoop_common_GetServiceStatusResponseProto_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hadoop_common_GetServiceStatusResponseProto_descriptor,
new java.lang.String[] { "State", "ReadyToBecomeActive", "NotReadyReason", });
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy